content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
generar_matriz<-function(filas,columnas,minimo=-1,maximo=50){
# filas<-3
# columnas<-5
# minimo=-1
# maximo<-50
m<-matrix(cbind(round(runif(filas*columnas,minimo,maximo))),ncol=columnas)
setwd("~/Dropbox/01_ITAM_Ciencia_de_Datos/2do_semestre/ComputoParalelo/Comp_Paralelo/ParalelizarMatrizCovarianzas")
out <- file("fuente.csv", "w", encoding="latin1")
write.table(m, out, sep=",", row.names=FALSE,col.names = FALSE)
close(out)
}
generar_matriz(10,100,-2,100)
| /generar_matriz_fuente.R | no_license | adfmb/ParalelizarMatrizCovarianzas | R | false | false | 491 | r | generar_matriz<-function(filas,columnas,minimo=-1,maximo=50){
# filas<-3
# columnas<-5
# minimo=-1
# maximo<-50
m<-matrix(cbind(round(runif(filas*columnas,minimo,maximo))),ncol=columnas)
setwd("~/Dropbox/01_ITAM_Ciencia_de_Datos/2do_semestre/ComputoParalelo/Comp_Paralelo/ParalelizarMatrizCovarianzas")
out <- file("fuente.csv", "w", encoding="latin1")
write.table(m, out, sep=",", row.names=FALSE,col.names = FALSE)
close(out)
}
generar_matriz(10,100,-2,100)
|
library(testthat)
library(StatRank)
test_package("StatRank")
| /tests/test-all.R | no_license | wzchen/StatRank | R | false | false | 62 | r | library(testthat)
library(StatRank)
test_package("StatRank")
|
dailyts <- ts(dayeven$flow, start=1, frequency = 365)
#decompose the dataset
components.ts = decompose(dailyts)
x=1:1417
par(mfrow=c(4,1))
plot(x,dailyts,type="l", ylab="Daily Time series")
plot(x,components.ts$seasonal,type="l",col="blue", ylab="Seasonality")
plot(x,components.ts$trend,type="l", col="green", ylab="Trend")
plot(x,components.ts$random,type="l", col="red", ylab="Random")
plot(components.ts, col =c("blue","green","black","red"))
acf(dailyts )
pacf(dailyts )
# go with finding the difference of the data and do analysis
second.comp = decompose(components.ts$random)
plot(second.comp)
hourlyts <- ts(houreven$flow, start=1, frequency = 365)
#decompose the dataset
first.h.ts = decompose(hourlyts)
plot(first.h.ts)
# go with finding the difference of the data and do analysis
second.h.comp = decompose(first.h.ts$random)
plot(second.h.comp)
third.h.comp = decompose(second.h.comp$random)
plot(third.h.comp)
fourth.h.comp = decompose(third.h.comp$random)
plot(fourth.h.comp)
#fit a curve to data to model the residual and remove the trend
#try unit root test: This test is used to find out that first difference or regression which should be used on the trending data to make it stationary. In Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test, small p-values suggest differencing is required.
library("fUnitRoots")
urkpssTest(dailyts, type = c("tau"), lags = c("long"),use.lag = NULL, doplot = TRUE)
tsstationary = diff(dailyts , differences=100)
plot(tsstationary)
urkpssTest(hourlyts, type = c("tau"), lags = c("short"),use.lag = NULL, doplot = TRUE)
h.tsstationary = diff(hourlyts , differences=1000)
plot(h.tsstationary)
| /sample scripts/Autoregression.R | no_license | AlexisCSP/csec-project | R | false | false | 1,705 | r |
dailyts <- ts(dayeven$flow, start=1, frequency = 365)
#decompose the dataset
components.ts = decompose(dailyts)
x=1:1417
par(mfrow=c(4,1))
plot(x,dailyts,type="l", ylab="Daily Time series")
plot(x,components.ts$seasonal,type="l",col="blue", ylab="Seasonality")
plot(x,components.ts$trend,type="l", col="green", ylab="Trend")
plot(x,components.ts$random,type="l", col="red", ylab="Random")
plot(components.ts, col =c("blue","green","black","red"))
acf(dailyts )
pacf(dailyts )
# go with finding the difference of the data and do analysis
second.comp = decompose(components.ts$random)
plot(second.comp)
hourlyts <- ts(houreven$flow, start=1, frequency = 365)
#decompose the dataset
first.h.ts = decompose(hourlyts)
plot(first.h.ts)
# go with finding the difference of the data and do analysis
second.h.comp = decompose(first.h.ts$random)
plot(second.h.comp)
third.h.comp = decompose(second.h.comp$random)
plot(third.h.comp)
fourth.h.comp = decompose(third.h.comp$random)
plot(fourth.h.comp)
#fit a curve to data to model the residual and remove the trend
#try unit root test: This test is used to find out that first difference or regression which should be used on the trending data to make it stationary. In Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test, small p-values suggest differencing is required.
library("fUnitRoots")
urkpssTest(dailyts, type = c("tau"), lags = c("long"),use.lag = NULL, doplot = TRUE)
tsstationary = diff(dailyts , differences=100)
plot(tsstationary)
urkpssTest(hourlyts, type = c("tau"), lags = c("short"),use.lag = NULL, doplot = TRUE)
h.tsstationary = diff(hourlyts , differences=1000)
plot(h.tsstationary)
|
#Load required packages
library(xml2)
library(rvest)
library(stringr)
#Read user related information: user id and number of pages to parse
id = "958982"
pages = 20
#Generate vectors in which we will save info
df = as.data.frame(matrix(data=NA,ncol=3,nrow=10000))
#Generate user URL
url = paste("https://www.filmaffinity.com/en/userratings.php?user_id=",id,sep="")
for(i in 1:pages) {
url2 = paste(url,"&p=",i,"&orderby=4",sep="")
rawinfo = read_html(url2)
#Read titles, directors, ratings
titles = html_nodes(rawinfo, ".mc-title")
directors = html_nodes(rawinfo, ".mc-director")
ratings = html_nodes(rawinfo, ".ur-mr-rat-img")
for(j in 1:length(directors)) {
#Title & director parsing is straightforward by navigating the title vector in Rstudio
title = xml_attrs(xml_child(titles[[j]], 1))[["title"]]
title = sub(",","",title)
director = xml_attrs(xml_child(xml_child(xml_child(directors[[j]], 1), 1), 1))[["title"]]
#Rating parsing is trickier: parsing the rating itself does not return anything,
#but parsing the .png file with the number of stars returns a URL where the
#actual rating can be extracted from
rating = sub("/imgs/myratings/","",xml_attrs(xml_child(ratings[[j]], 1))[["src"]])
rating = sub(".png","",rating)
rating = sub("_","",rating)
rating = as.integer(rating)
#Now we add this info to our data frame (unless it's a TV show)
if(!str_detect(title,"TV")) df[(i-1)*30+j,] = c(title,director,rating)
}
}
#Change name of the columns and clean the data frame
colnames(df) = c("Title","Directors","Rating10")
df = na.omit(df)
#Save into .csv file
write.csv(df,file="output.csv",row.names=F,quote=F)
| /fa-to-csv.R | no_license | malmriv/r-lab | R | false | false | 1,708 | r | #Load required packages
library(xml2)
library(rvest)
library(stringr)
#Read user related information: user id and number of pages to parse
id = "958982"
pages = 20
#Generate vectors in which we will save info
df = as.data.frame(matrix(data=NA,ncol=3,nrow=10000))
#Generate user URL
url = paste("https://www.filmaffinity.com/en/userratings.php?user_id=",id,sep="")
for(i in 1:pages) {
url2 = paste(url,"&p=",i,"&orderby=4",sep="")
rawinfo = read_html(url2)
#Read titles, directors, ratings
titles = html_nodes(rawinfo, ".mc-title")
directors = html_nodes(rawinfo, ".mc-director")
ratings = html_nodes(rawinfo, ".ur-mr-rat-img")
for(j in 1:length(directors)) {
#Title & director parsing is straightforward by navigating the title vector in Rstudio
title = xml_attrs(xml_child(titles[[j]], 1))[["title"]]
title = sub(",","",title)
director = xml_attrs(xml_child(xml_child(xml_child(directors[[j]], 1), 1), 1))[["title"]]
#Rating parsing is trickier: parsing the rating itself does not return anything,
#but parsing the .png file with the number of stars returns a URL where the
#actual rating can be extracted from
rating = sub("/imgs/myratings/","",xml_attrs(xml_child(ratings[[j]], 1))[["src"]])
rating = sub(".png","",rating)
rating = sub("_","",rating)
rating = as.integer(rating)
#Now we add this info to our data frame (unless it's a TV show)
if(!str_detect(title,"TV")) df[(i-1)*30+j,] = c(title,director,rating)
}
}
#Change name of the columns and clean the data frame
colnames(df) = c("Title","Directors","Rating10")
df = na.omit(df)
#Save into .csv file
write.csv(df,file="output.csv",row.names=F,quote=F)
|
air_speed = function( GPS.data ,
wind.direct,
wind.speed,
grnspeed,
plot = F,
return.support.cross = F){
# objects
# GPS.data = data[,1:2,j]
# wind.direct = md$wind.direct.site
# wind.speed = md$wind.speed.site
# plot = T
# grnspeed = speed
# return.support.cross = T
# objects
dir = c("S","SSW","SW","WSW","W","WNW","NW","NNW","N","NNE","NE","ENE","E","ESE","SE","SSE")
#stopifnot
if ( ! wind.direct %in% dir){
stop( "wind dir must be one of the following c('S','SSW','SW','WSW','W','WNW','NW','NNW','N','NNE','NE','ENE', 'E','ESE','SE','SSE')")
}
if( ncol(GPS.data) != 2 | dimnames(GPS.data)[[2]][1] != "lon"){
stop( "GPS data must be a matrix with column names 'lon' and 'lat' ")
}
#data
data = as.data.frame(GPS.data)
# get_heading
data$head = get_heading(data[,1], data[,2], indivs = 1)
dir.circ = data.frame ( circ = seq(-pi,pi-(pi/16),length.out=16),
dir)
wind.dir = dir.circ$circ[which(dir.circ$dir == wind.direct)]
#Airspeed
air.speed = rep(NA, nrow(data))
cross=rep(NA,nrow(data))
support=rep(NA,nrow(data))
for (i in 1:nrow(data)){
diff =wind.dir - data$head[i]
x = abs(atan2(sin(diff), cos(diff)))
diff = ifelse ( diff < -pi, diff+2*pi,diff)
diff = ifelse ( diff > pi , diff-2*pi,diff)
if(!is.na(x)){
if ( abs(x) > pi/2){ # if support is negative
x = abs(x) - (pi/2)
if ( sign(diff) == 1 ){ # if cross is positive
cross[i] = abs(cos(x)) * wind.speed
support[i] = -abs(sin(x)) * wind.speed
} else{ # if cross is negative
cross[i] = -abs(cos(x)) * wind.speed
support[i] = -abs(sin(x)) * wind.speed
}
} else{ # if support is positive
if( sign(diff) == 1){ # if cross is positive
cross[i] = abs(sin(x)) * wind.speed
support[i]= abs(cos(x)) * wind.speed
} else{ # if cross is negative
cross[i] = -abs(sin(x)) * wind.speed
support[i]= abs(cos(x)) * wind.speed
}
}
air.speed[i] = sqrt ( (grnspeed[i] - support[i])^2 + cross[i]^2)
} else{
air.speed[i] = NA
}
}
#plot?
if(plot){
plot(grnspeed,type = "l")
lines(air.speed,col="red")
}
# return data
if( return.support.cross){
return(list(air.speed,support,cross,wind.dir))
} else{
return(air.speed)
}
}
| /R/air_speed.R | no_license | sankeydan/robofalcon | R | false | false | 2,507 | r |
air_speed = function( GPS.data ,
wind.direct,
wind.speed,
grnspeed,
plot = F,
return.support.cross = F){
# objects
# GPS.data = data[,1:2,j]
# wind.direct = md$wind.direct.site
# wind.speed = md$wind.speed.site
# plot = T
# grnspeed = speed
# return.support.cross = T
# objects
dir = c("S","SSW","SW","WSW","W","WNW","NW","NNW","N","NNE","NE","ENE","E","ESE","SE","SSE")
#stopifnot
if ( ! wind.direct %in% dir){
stop( "wind dir must be one of the following c('S','SSW','SW','WSW','W','WNW','NW','NNW','N','NNE','NE','ENE', 'E','ESE','SE','SSE')")
}
if( ncol(GPS.data) != 2 | dimnames(GPS.data)[[2]][1] != "lon"){
stop( "GPS data must be a matrix with column names 'lon' and 'lat' ")
}
#data
data = as.data.frame(GPS.data)
# get_heading
data$head = get_heading(data[,1], data[,2], indivs = 1)
dir.circ = data.frame ( circ = seq(-pi,pi-(pi/16),length.out=16),
dir)
wind.dir = dir.circ$circ[which(dir.circ$dir == wind.direct)]
#Airspeed
air.speed = rep(NA, nrow(data))
cross=rep(NA,nrow(data))
support=rep(NA,nrow(data))
for (i in 1:nrow(data)){
diff =wind.dir - data$head[i]
x = abs(atan2(sin(diff), cos(diff)))
diff = ifelse ( diff < -pi, diff+2*pi,diff)
diff = ifelse ( diff > pi , diff-2*pi,diff)
if(!is.na(x)){
if ( abs(x) > pi/2){ # if support is negative
x = abs(x) - (pi/2)
if ( sign(diff) == 1 ){ # if cross is positive
cross[i] = abs(cos(x)) * wind.speed
support[i] = -abs(sin(x)) * wind.speed
} else{ # if cross is negative
cross[i] = -abs(cos(x)) * wind.speed
support[i] = -abs(sin(x)) * wind.speed
}
} else{ # if support is positive
if( sign(diff) == 1){ # if cross is positive
cross[i] = abs(sin(x)) * wind.speed
support[i]= abs(cos(x)) * wind.speed
} else{ # if cross is negative
cross[i] = -abs(sin(x)) * wind.speed
support[i]= abs(cos(x)) * wind.speed
}
}
air.speed[i] = sqrt ( (grnspeed[i] - support[i])^2 + cross[i]^2)
} else{
air.speed[i] = NA
}
}
#plot?
if(plot){
plot(grnspeed,type = "l")
lines(air.speed,col="red")
}
# return data
if( return.support.cross){
return(list(air.speed,support,cross,wind.dir))
} else{
return(air.speed)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/freq.r
\name{freq.data.frame}
\alias{freq.data.frame}
\title{Compute frequencies (data.frame input)}
\usage{
freq.data.frame(df, showNA = c("no", "ifany", "always"), total = FALSE,
digits = 2, label = FALSE)
}
\arguments{
\item{df}{data.frame}
\item{showNA}{showNA}
}
\description{
Compute frequencies (data.frame input)
}
\author{
David Hajage
}
\keyword{internal}
| /man/freq.data.frame.Rd | no_license | eusebe/biostat2 | R | false | true | 447 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/freq.r
\name{freq.data.frame}
\alias{freq.data.frame}
\title{Compute frequencies (data.frame input)}
\usage{
freq.data.frame(df, showNA = c("no", "ifany", "always"), total = FALSE,
digits = 2, label = FALSE)
}
\arguments{
\item{df}{data.frame}
\item{showNA}{showNA}
}
\description{
Compute frequencies (data.frame input)
}
\author{
David Hajage
}
\keyword{internal}
|
library(pdftools)
library(stringr)
library(plotly)
# Get current time for later use
currenttime <- as.POSIXct(Sys.time(), tz=Sys.timezone())
attributes(currenttime)$tzone <- "Asia/Hong_Kong"
currenttimetext <- paste("最後更新於香港時間 ", format(currenttime, "%Y-%m-%d %H:%M"), sep="")
# download latest data file
download.file("https://www.chp.gov.hk/files/pdf/building_list_eng_20200926.pdf", "../00_original/building_list_eng_20200926.pdf")
# remember to manually check if this is the correct separator
# "List of buildings with confirmed / probable cases visited from 2 days before onset"
# read in all pdf names
pdflist <- dir(path="../00_original/", pattern=".pdf")
###################################
# preparation for info extraction #
###################################
# read in hk district
district <- read.csv("../00_original/district-population.csv")
# set up empty df for 2-week numbers
master2wk <- data.frame(district_en = NULL, day = NULL, case = NULL)
##########################
# actual info extraction #
##########################
# from building_list_eng_20200123_184843.pdf == pdflist[1]
# to building_list_eng_20200212_202406.pdf == pdflist[18]
for (i_file in 1:18){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
table(master2wk$day)
# from building_list_eng_20200213_000000.pdf == pdflist[19]
# to building_list_eng_20200216_220021.pdf == pdflist[22]
# need to remove all text after
# "\n List of non-residential building with 2 or more confirmed cases of novel coronavirus infection\n"
for (i_file in 19:22){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
temptext <- gsub("\n List of non-residential building with 2 or more confirmed cases of novel coronavirus infection\n.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# only on building_list_eng_20200217_231207.pdf == pdflist[23]
targetfile <- paste("../00_original/", pdflist[23], sep="")
temptext <- pdf_text(targetfile)
temptext <- temptext[1:grep("List of non-residential buildings with confirmed cases visited after onset of symptoms", temptext)-1]
tempdate <- stringr::str_extract(pdflist[23], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
# from building_list_eng_20200218_215230.pdf == pdflist[24]
# to building_list_eng_20200219_173655.pdf == pdflist[25]
for (i_file in 24:25){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed cases visited after onset of symptoms or", temptext)-1]
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# from building_list_eng_20200220_224016.pdf == pdflist[26]
# to building_list_eng_20200221_213153.pdf == pdflist[27]
for (i_file in 26:27){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
temptext <- temptext[1:grep("List of buildings with probable/confirmed cases visited after onset of symptoms", temptext)-1]
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# for building_list_eng_20200222_000000.pdf == pdflist[28]
targetfile <- paste("../00_original/", pdflist[28], sep="")
temptext <- pdf_text(targetfile)
temptext <- temptext[1:grep("List of buildings with probable/confirmed cases visited from 2 days before onset of symptoms", temptext)-1]
tempdate <- stringr::str_extract(pdflist[28], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
# List of buildings with confirmed cases visited from 2 days before onset of symptoms
# building_list_eng_20200223_225143.pdf == pdflist[29]
# building_list_eng_20200310_225716.pdf == pdflist[45]
for (i_file in 29:45){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed cases visited from 2 days before onset of symptoms", temptext)]
temptext <- gsub("List of buildings with confirmed cases visited from 2 days before onset of symptoms.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# List of buildings with confirmed / probable cases visited from 2 days
# from building_list_eng_20200311_223130.pdf == pdflist[46]
# to building_list_eng_20200326_000000.pdf == pdflist[61]
for (i_file in 46:61){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed / probable cases visited from 2 days", temptext)-1]
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of buildings with confirmed / probable cases visited"
# from building_list_eng_20200327_234929.pdf == pdflist[62]
# to building_list_eng_20200425_000000.pdf == pdflist[91]
for (i_file in 62:91){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed / probable cases visited", temptext)-1]
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of buildings visited by confirmed / probable cases"
# from building_list_eng_20200426_000000.pdf == pdflist[92]
# to building_list_eng_20200428.pdf == pdflist[94]
for (i_file in 92:94){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of buildings visited by confirmed / probable cases", temptext)]
temptext <- gsub("List of buildings visited by confirmed / probable cases.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of Buildings Visited by Confirmed / Probable Cases"
# from building_list_eng_20200429.pdf == pdflist[95]
# to building_list_eng_20200707.pdf == pdflist[164]
for (i_file in 95:164){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of Buildings Visited by Confirmed / Probable Cases", temptext)]
temptext <- gsub("List of Buildings Visited by Confirmed / Probable Cases.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of buildings with confirmed / probable cases visited from 2 days"
# from building_list_eng_20200708.pdf == pdflist[165]
# to building_list_eng_20200829.pdf == pdflist[217]
for (i_file in 165:217){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed / probable cases visited from 2 days", temptext)]
temptext <- gsub("List of buildings with confirmed / probable cases visited from 2 days.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of buildings with confirmed / probable cases visited from 2 days before onset"
# from building_list_eng_20200830.pdf == pdflist[218]
# to current
for (i_file in 218:length(pdflist)){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed / probable cases visited from 2 days before onset", temptext)]
temptext <- gsub("List of buildings with confirmed / probable cases visited from 2 days before onset.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
#############################
# done with data extraction #
#############################
write.csv(master2wk, "hk-covid19-2wk.csv", row.names=F)
###################################
# data wrangle / summary for plot #
###################################
master2wk$case <- as.numeric(as.character(master2wk$case))
master2wk$day <- as.Date(master2wk$day)
masterday <- master2wk
masterday$case <- round(masterday$case/14, 3)
masterday <- merge(masterday, district, by="district_en", all.x=T)
masterday$case100k <- round(masterday$case / masterday$pop*100000, 2)
# aesthetics
masterday <- masterday[order(masterday$day, masterday$district_en), ]
masterday <- masterday[, c(1, 4, 5, 2, 3, 6)]
day_earliest <- min(masterday$day)
day_latest <- max(masterday$day)
# find latest PDF name
# extract data for map
pdflist[length(pdflist)]
latestdate <- stringr::str_extract(pdflist[length(pdflist)], "2020[0-9]+")
latestdate <- format(as.Date(latestdate, "%Y%m%d"))
master2wk_latest <- subset(master2wk, master2wk$day == latestdate)
master2wk_latest <- merge(master2wk_latest, district, by="district_en")
master2wk_latest$case100k <- round(master2wk_latest$case / master2wk_latest$pop * 100000, 3)
master2wk_latest$day <- NULL
master2wk_latest$district_ch <- NULL
master2wk_latest$pop <- NULL
names(master2wk_latest) <- c("District", "case", "case100k")
head(masterday)
#########
# plots #
#########
fig_day <-
plot_ly() %>%
add_trace(data=masterday, type="scatter", mode="lines",
x=~day, y=~case, color=~district_ch) %>%
layout(title=list(text="14日平均每日新增新型肺炎個案", y=0.99),
xaxis=list(title="日期", range=c(day_earliest-1, day_latest +1)),
yaxis=list(title="14日平均新增新型肺炎個案"),
legend=list(x=0.025, y=0.975))
fig_100k <-
plot_ly() %>%
add_trace(data=masterday, type="scatter", mode="lines",
x=~day, y=~case100k, color=~district_ch) %>%
layout(title=list(text="14日平均每日每十萬人新增新型肺炎個案", y=0.99),
xaxis=list(title="日期", range=c(day_earliest-1, day_latest +1)),
yaxis=list(title="14日平均每十萬人新增新型肺炎個案"),
legend=list(x=0.025, y=0.975))
#######
# map #
#######
library(sf)
library(leaflet)
# read, then merge with numbers
districtmap <- st_read("../00_original/hksar_18_district_boundary.json")
districtmap <- merge(districtmap, master2wk_latest, by="District")
# cf https://rstudio.github.io/leaflet/choropleths.html
bins_raw <- c(0, 1, 2, 5, 10, 15, 70, 100, Inf)
palette_raw <- colorBin("Reds", domain=districtmap$case, bins=bins_raw)
label_raw <- sprintf("<strong>%s</strong><br/>過去14日有 %g 宗確診或疑似個案",
districtmap$地區, districtmap$case) %>%
lapply(htmltools::HTML)
map_raw <-
leaflet() %>%
setView(114.167265, 22.360296, zoom=10) %>%
addPolygons(data=districtmap, color="black", weight=2, dashArray="3",
fillColor=~palette_raw(case), fillOpacity=0.7,
highlight=highlightOptions(weight=5, dashArray = "", bringToFront = TRUE),
label=label_raw) %>%
addProviderTiles(providers$Esri.WorldTopoMap)
bins_100k <- c(0, 1, 2.5, 5, 10, 25, Inf)
palette_100k <- colorBin("Reds", domain=districtmap$case100k, bins=bins_100k)
label_100k <- sprintf("<strong>%s</strong><br/>過去14日,每十萬人有 %g 宗確診或疑似個案",
districtmap$地區, districtmap$case100k) %>%
lapply(htmltools::HTML)
map_100k <-
leaflet() %>%
setView(114.167265, 22.360296, zoom=10) %>%
addPolygons(data=districtmap, color="black", weight=2, dashArray="3",
fillColor=~palette_100k(case100k), fillOpacity=0.7,
highlight=highlightOptions(weight=5, dashArray = "", bringToFront = TRUE),
label=label_100k) %>%
addProviderTiles(providers$Esri.WorldTopoMap)
##################
# Make dashboard #
##################
# make dashboard, copy to root directory
rmarkdown::render(input = "index.Rmd")
file.copy("index.html", "../docs/", overwrite=T)
| /2020-09-26/analysis.R | no_license | tszhim-tsui/2020-hk-covid19 | R | false | false | 27,307 | r | library(pdftools)
library(stringr)
library(plotly)
# Get current time for later use
currenttime <- as.POSIXct(Sys.time(), tz=Sys.timezone())
attributes(currenttime)$tzone <- "Asia/Hong_Kong"
currenttimetext <- paste("最後更新於香港時間 ", format(currenttime, "%Y-%m-%d %H:%M"), sep="")
# download latest data file
download.file("https://www.chp.gov.hk/files/pdf/building_list_eng_20200926.pdf", "../00_original/building_list_eng_20200926.pdf")
# remember to manually check if this is the correct separator
# "List of buildings with confirmed / probable cases visited from 2 days before onset"
# read in all pdf names
pdflist <- dir(path="../00_original/", pattern=".pdf")
###################################
# preparation for info extraction #
###################################
# read in hk district
district <- read.csv("../00_original/district-population.csv")
# set up empty df for 2-week numbers
master2wk <- data.frame(district_en = NULL, day = NULL, case = NULL)
##########################
# actual info extraction #
##########################
# from building_list_eng_20200123_184843.pdf == pdflist[1]
# to building_list_eng_20200212_202406.pdf == pdflist[18]
for (i_file in 1:18){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
table(master2wk$day)
# from building_list_eng_20200213_000000.pdf == pdflist[19]
# to building_list_eng_20200216_220021.pdf == pdflist[22]
# need to remove all text after
# "\n List of non-residential building with 2 or more confirmed cases of novel coronavirus infection\n"
for (i_file in 19:22){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
temptext <- gsub("\n List of non-residential building with 2 or more confirmed cases of novel coronavirus infection\n.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# only on building_list_eng_20200217_231207.pdf == pdflist[23]
targetfile <- paste("../00_original/", pdflist[23], sep="")
temptext <- pdf_text(targetfile)
temptext <- temptext[1:grep("List of non-residential buildings with confirmed cases visited after onset of symptoms", temptext)-1]
tempdate <- stringr::str_extract(pdflist[23], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
# from building_list_eng_20200218_215230.pdf == pdflist[24]
# to building_list_eng_20200219_173655.pdf == pdflist[25]
for (i_file in 24:25){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed cases visited after onset of symptoms or", temptext)-1]
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# from building_list_eng_20200220_224016.pdf == pdflist[26]
# to building_list_eng_20200221_213153.pdf == pdflist[27]
for (i_file in 26:27){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
temptext <- temptext[1:grep("List of buildings with probable/confirmed cases visited after onset of symptoms", temptext)-1]
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# for building_list_eng_20200222_000000.pdf == pdflist[28]
targetfile <- paste("../00_original/", pdflist[28], sep="")
temptext <- pdf_text(targetfile)
temptext <- temptext[1:grep("List of buildings with probable/confirmed cases visited from 2 days before onset of symptoms", temptext)-1]
tempdate <- stringr::str_extract(pdflist[28], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
# List of buildings with confirmed cases visited from 2 days before onset of symptoms
# building_list_eng_20200223_225143.pdf == pdflist[29]
# building_list_eng_20200310_225716.pdf == pdflist[45]
for (i_file in 29:45){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed cases visited from 2 days before onset of symptoms", temptext)]
temptext <- gsub("List of buildings with confirmed cases visited from 2 days before onset of symptoms.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# List of buildings with confirmed / probable cases visited from 2 days
# from building_list_eng_20200311_223130.pdf == pdflist[46]
# to building_list_eng_20200326_000000.pdf == pdflist[61]
for (i_file in 46:61){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed / probable cases visited from 2 days", temptext)-1]
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of buildings with confirmed / probable cases visited"
# from building_list_eng_20200327_234929.pdf == pdflist[62]
# to building_list_eng_20200425_000000.pdf == pdflist[91]
for (i_file in 62:91){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed / probable cases visited", temptext)-1]
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of buildings visited by confirmed / probable cases"
# from building_list_eng_20200426_000000.pdf == pdflist[92]
# to building_list_eng_20200428.pdf == pdflist[94]
for (i_file in 92:94){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of buildings visited by confirmed / probable cases", temptext)]
temptext <- gsub("List of buildings visited by confirmed / probable cases.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of Buildings Visited by Confirmed / Probable Cases"
# from building_list_eng_20200429.pdf == pdflist[95]
# to building_list_eng_20200707.pdf == pdflist[164]
for (i_file in 95:164){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of Buildings Visited by Confirmed / Probable Cases", temptext)]
temptext <- gsub("List of Buildings Visited by Confirmed / Probable Cases.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of buildings with confirmed / probable cases visited from 2 days"
# from building_list_eng_20200708.pdf == pdflist[165]
# to building_list_eng_20200829.pdf == pdflist[217]
for (i_file in 165:217){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed / probable cases visited from 2 days", temptext)]
temptext <- gsub("List of buildings with confirmed / probable cases visited from 2 days.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of buildings with confirmed / probable cases visited from 2 days before onset"
# from building_list_eng_20200830.pdf == pdflist[218]
# to current
for (i_file in 218:length(pdflist)){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed / probable cases visited from 2 days before onset", temptext)]
temptext <- gsub("List of buildings with confirmed / probable cases visited from 2 days before onset.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
#############################
# done with data extraction #
#############################
write.csv(master2wk, "hk-covid19-2wk.csv", row.names=F)
###################################
# data wrangle / summary for plot #
###################################
master2wk$case <- as.numeric(as.character(master2wk$case))
master2wk$day <- as.Date(master2wk$day)
masterday <- master2wk
masterday$case <- round(masterday$case/14, 3)
masterday <- merge(masterday, district, by="district_en", all.x=T)
masterday$case100k <- round(masterday$case / masterday$pop*100000, 2)
# aesthetics
masterday <- masterday[order(masterday$day, masterday$district_en), ]
masterday <- masterday[, c(1, 4, 5, 2, 3, 6)]
day_earliest <- min(masterday$day)
day_latest <- max(masterday$day)
# find latest PDF name
# extract data for map
pdflist[length(pdflist)]
latestdate <- stringr::str_extract(pdflist[length(pdflist)], "2020[0-9]+")
latestdate <- format(as.Date(latestdate, "%Y%m%d"))
master2wk_latest <- subset(master2wk, master2wk$day == latestdate)
master2wk_latest <- merge(master2wk_latest, district, by="district_en")
master2wk_latest$case100k <- round(master2wk_latest$case / master2wk_latest$pop * 100000, 3)
master2wk_latest$day <- NULL
master2wk_latest$district_ch <- NULL
master2wk_latest$pop <- NULL
names(master2wk_latest) <- c("District", "case", "case100k")
head(masterday)
#########
# plots #
#########
fig_day <-
plot_ly() %>%
add_trace(data=masterday, type="scatter", mode="lines",
x=~day, y=~case, color=~district_ch) %>%
layout(title=list(text="14日平均每日新增新型肺炎個案", y=0.99),
xaxis=list(title="日期", range=c(day_earliest-1, day_latest +1)),
yaxis=list(title="14日平均新增新型肺炎個案"),
legend=list(x=0.025, y=0.975))
fig_100k <-
plot_ly() %>%
add_trace(data=masterday, type="scatter", mode="lines",
x=~day, y=~case100k, color=~district_ch) %>%
layout(title=list(text="14日平均每日每十萬人新增新型肺炎個案", y=0.99),
xaxis=list(title="日期", range=c(day_earliest-1, day_latest +1)),
yaxis=list(title="14日平均每十萬人新增新型肺炎個案"),
legend=list(x=0.025, y=0.975))
#######
# map #
#######
library(sf)
library(leaflet)
# read, then merge with numbers
districtmap <- st_read("../00_original/hksar_18_district_boundary.json")
districtmap <- merge(districtmap, master2wk_latest, by="District")
# cf https://rstudio.github.io/leaflet/choropleths.html
bins_raw <- c(0, 1, 2, 5, 10, 15, 70, 100, Inf)
palette_raw <- colorBin("Reds", domain=districtmap$case, bins=bins_raw)
label_raw <- sprintf("<strong>%s</strong><br/>過去14日有 %g 宗確診或疑似個案",
districtmap$地區, districtmap$case) %>%
lapply(htmltools::HTML)
map_raw <-
leaflet() %>%
setView(114.167265, 22.360296, zoom=10) %>%
addPolygons(data=districtmap, color="black", weight=2, dashArray="3",
fillColor=~palette_raw(case), fillOpacity=0.7,
highlight=highlightOptions(weight=5, dashArray = "", bringToFront = TRUE),
label=label_raw) %>%
addProviderTiles(providers$Esri.WorldTopoMap)
bins_100k <- c(0, 1, 2.5, 5, 10, 25, Inf)
palette_100k <- colorBin("Reds", domain=districtmap$case100k, bins=bins_100k)
label_100k <- sprintf("<strong>%s</strong><br/>過去14日,每十萬人有 %g 宗確診或疑似個案",
districtmap$地區, districtmap$case100k) %>%
lapply(htmltools::HTML)
map_100k <-
leaflet() %>%
setView(114.167265, 22.360296, zoom=10) %>%
addPolygons(data=districtmap, color="black", weight=2, dashArray="3",
fillColor=~palette_100k(case100k), fillOpacity=0.7,
highlight=highlightOptions(weight=5, dashArray = "", bringToFront = TRUE),
label=label_100k) %>%
addProviderTiles(providers$Esri.WorldTopoMap)
##################
# Make dashboard #
##################
# make dashboard, copy to root directory
rmarkdown::render(input = "index.Rmd")
file.copy("index.html", "../docs/", overwrite=T)
|
\name{addDescription}
\alias{addDescription}
\title{Adds a description to a 'netCoin' object.}
\description{
\code{addDescription} adds a description to a 'netCoin' object.
}
\value{
A 'netCoin' object.
}
\usage{
addDescription(x, description)
}
\arguments{
\item{x}{A 'netCoin' object.}
\item{description}{the description text.}
}
\author{
Modesto Escobar, Department of Sociology and Communication, University of Salamanca.
}
\examples{
data(finches)
data(Galapagos)
Net<-allNet(Galapagos,nodes=finches, criteria="hyp", maxL=.05,
lwidth ="Haberman",lweight="Haberman",
size="frequency", color="species", layout="mds",
main="Species coincidences in Galapagos Islands",
note="Data source: Sanderson (2000)")
img <- system.file("extdata", "p.Crassirostris.png",
package="netCoin")
Net <- addDescription(Net,"Species coincidences in Galapagos Islands")
\dontrun{
multi <- multigraphCreate(Network=Net)
multiPages(multi,"Graph description example",show=TRUE)
}
}
| /man/addDescription.Rd | no_license | cran/netCoin | R | false | false | 1,043 | rd | \name{addDescription}
\alias{addDescription}
\title{Adds a description to a 'netCoin' object.}
\description{
\code{addDescription} adds a description to a 'netCoin' object.
}
\value{
A 'netCoin' object.
}
\usage{
addDescription(x, description)
}
\arguments{
\item{x}{A 'netCoin' object.}
\item{description}{the description text.}
}
\author{
Modesto Escobar, Department of Sociology and Communication, University of Salamanca.
}
\examples{
data(finches)
data(Galapagos)
Net<-allNet(Galapagos,nodes=finches, criteria="hyp", maxL=.05,
lwidth ="Haberman",lweight="Haberman",
size="frequency", color="species", layout="mds",
main="Species coincidences in Galapagos Islands",
note="Data source: Sanderson (2000)")
img <- system.file("extdata", "p.Crassirostris.png",
package="netCoin")
Net <- addDescription(Net,"Species coincidences in Galapagos Islands")
\dontrun{
multi <- multigraphCreate(Network=Net)
multiPages(multi,"Graph description example",show=TRUE)
}
}
|
testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85509427630443e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142))
result <- do.call(dcurver:::ddc,testlist)
str(result) | /dcurver/inst/testfiles/ddc/AFL_ddc/ddc_valgrind_files/1609866702-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 831 | r | testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85509427630443e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142))
result <- do.call(dcurver:::ddc,testlist)
str(result) |
/mon78abd_spict/Model_V0.R | no_license | MERVEX-group/Spict-IMPRESS | R | false | false | 2,746 | r | ||
/R/R_basic/chapter05.R | no_license | kimsang-kyu/public_bigdata_internship | R | false | false | 2,028 | r | ||
get_weather <- function(station = "FQMA", # CDG, BGT, ATL, JFK
start_year = 2000,
end_year = 2016,
plot = FALSE,
save = TRUE,
load_saved = TRUE){
require(data.table)
# Define a filename
file_name <- paste0('weather_',
station,
'_',
start_year,
'_',
end_year,
'.RData')
if(load_saved & file_name %in% dir()){
load(file_name)
} else {
# Format station name
station <- toupper(gsub(" ", "%20", station))
# Adjust dates
start_date <- as.Date(paste0(start_year, '-01-01'))
end_date <- as.Date(paste0(end_year, '-12-31'))
if(end_date > Sys.Date()){
end_date <- Sys.Date() - 1
}
# Parse date components
start_day <- as.numeric(format(start_date, "%d"))
start_month <- as.numeric(format(start_date, "%m"))
start_year <- as.numeric(format(start_date, "%Y"))
end_day <- as.numeric(format(end_date, "%d"))
end_month <- as.numeric(format(end_date, "%m"))
end_year <- as.numeric(format(end_date, "%Y"))
# Get years
years <- start_year:end_year
# For each year, get the data and store in list
results_list <- list()
for (i in 1:length(years)){
try({
this_year <- years[i]
this_start_month <- 1
this_start_day <- 1
if(this_year == end_year){
this_end_month <- as.numeric(format(end_date, '%m'))
this_end_day <- as.numeric(format(end_date, '%m'))
} else {
this_end_month <- 12
this_end_day <- 31
}
# Define link format for airports
link <- paste0("http://www.wunderground.com/history/airport/",
station,
"/", this_year,
"/", this_start_month,
"/", this_start_day,
"/CustomHistory.html?dayend=", this_end_day,
"&monthend=", this_end_month,
"&yearend=", this_year,
"&req_city=NA&req_state=NA&req_statename=NA&format=1")
# # Read in data from link
df <- suppressWarnings(fread(link))
names_df <- names(df)
df <- data.frame(df)
names(df) <- names_df
# Keep only the first 20 columns (through cloud cover)
df <- df[,1:21]
# Fix date
names(df)[1] <- 'date'
df$date <- as.Date(df$date, format = '%Y-%m-%d')
# Fix other names
names(df) <-
tolower(gsub(' |[/]', '_', names(df)))
# Keep only certain columns
df <- df[,!grepl('sea_level|visibility|wind|gust|dew', names(df))]
# # Standardize names
names(df) <- c("date",
"temp_max",
"temp_mean",
"temp_min",
"humidity_max",
"humidity_mean",
"humidity_min",
"precipitation",
"cloud_cover")
#
# Add a location column
df$location <- toupper(as.character(station))
# print url source
message(paste0('Data retrieved for ', this_year))
# Stick results into list
results_list[[i]] <- df
})
}
# Bind together results
x <- do.call('rbind', results_list)
}
# Save if applicable
if(save){
save(x, file = file_name)
}
return(x)
} | /get_weather.R | no_license | joebrew/weather | R | false | false | 3,762 | r | get_weather <- function(station = "FQMA", # CDG, BGT, ATL, JFK
start_year = 2000,
end_year = 2016,
plot = FALSE,
save = TRUE,
load_saved = TRUE){
require(data.table)
# Define a filename
file_name <- paste0('weather_',
station,
'_',
start_year,
'_',
end_year,
'.RData')
if(load_saved & file_name %in% dir()){
load(file_name)
} else {
# Format station name
station <- toupper(gsub(" ", "%20", station))
# Adjust dates
start_date <- as.Date(paste0(start_year, '-01-01'))
end_date <- as.Date(paste0(end_year, '-12-31'))
if(end_date > Sys.Date()){
end_date <- Sys.Date() - 1
}
# Parse date components
start_day <- as.numeric(format(start_date, "%d"))
start_month <- as.numeric(format(start_date, "%m"))
start_year <- as.numeric(format(start_date, "%Y"))
end_day <- as.numeric(format(end_date, "%d"))
end_month <- as.numeric(format(end_date, "%m"))
end_year <- as.numeric(format(end_date, "%Y"))
# Get years
years <- start_year:end_year
# For each year, get the data and store in list
results_list <- list()
for (i in 1:length(years)){
try({
this_year <- years[i]
this_start_month <- 1
this_start_day <- 1
if(this_year == end_year){
this_end_month <- as.numeric(format(end_date, '%m'))
this_end_day <- as.numeric(format(end_date, '%m'))
} else {
this_end_month <- 12
this_end_day <- 31
}
# Define link format for airports
link <- paste0("http://www.wunderground.com/history/airport/",
station,
"/", this_year,
"/", this_start_month,
"/", this_start_day,
"/CustomHistory.html?dayend=", this_end_day,
"&monthend=", this_end_month,
"&yearend=", this_year,
"&req_city=NA&req_state=NA&req_statename=NA&format=1")
# # Read in data from link
df <- suppressWarnings(fread(link))
names_df <- names(df)
df <- data.frame(df)
names(df) <- names_df
# Keep only the first 20 columns (through cloud cover)
df <- df[,1:21]
# Fix date
names(df)[1] <- 'date'
df$date <- as.Date(df$date, format = '%Y-%m-%d')
# Fix other names
names(df) <-
tolower(gsub(' |[/]', '_', names(df)))
# Keep only certain columns
df <- df[,!grepl('sea_level|visibility|wind|gust|dew', names(df))]
# # Standardize names
names(df) <- c("date",
"temp_max",
"temp_mean",
"temp_min",
"humidity_max",
"humidity_mean",
"humidity_min",
"precipitation",
"cloud_cover")
#
# Add a location column
df$location <- toupper(as.character(station))
# print url source
message(paste0('Data retrieved for ', this_year))
# Stick results into list
results_list[[i]] <- df
})
}
# Bind together results
x <- do.call('rbind', results_list)
}
# Save if applicable
if(save){
save(x, file = file_name)
}
return(x)
} |
Bayesian epiallele detection
Copyright (C) 2019 James E. Barrett (regmjeb@ucl.ac.uk)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
data_assembly <- function(sam_data){
# Flags 99 and 147 mean that the genome coordinate of a CpG aligns with the C (forward reads).
# Flags 83 and 163 mean that the coordinate alingns with the G (reverse reads).
# Therefore reverse reads will have 1 bp subtracted from start, end, and coords.
# After this forward and reverse reads are pooled together.
# sam_data[[1]] <- 'name'
# sam_data[[2]] <- 'flag1'
# sam_data[[3]] <- 'flag2'
# sam_data[[4]] <- 'start'
# sam_data[[5]] <- 'end'
# sam_data[[6]] <- 'z'
# sam_data[[7]] <- 'Z'
# source files (seems to be necessary for parallelisation)
for (src in dir('R_files')){
source(paste('R_files/',src,sep=''))
}
# ----------------------------------------------------------- #
# Extract relevant strand
#-------------------------------------------------------------#
data <- sam_data
data[[4]] <- as.numeric(data[[4]])
data[[5]] <- as.numeric(data[[5]])
# Remove NA starts and ends
na <- which(is.na(data[[5]])|is.na(data[[5]]))
for (mu in c(1:7)){data[[mu]] <- data[[mu]][-na]}
names(data) <- c('name','flag1','flag2','start','end','z','Z')
# Subtract 1 from reverse reads
data[[4]][which(data[[2]]=="83")] <- data[[4]][which(data[[2]]=="83")] - 1
data[[5]][which(data[[2]]=="83")] <- data[[5]][which(data[[2]]=="83")] - 1
# ----------------------------------------------------------- #
# Generate partition maps
#-------------------------------------------------------------#
start <- data[[4]]
end <- data[[5]]
ord <- order(start)
start <- start[ord]
end <- end[ord]
for (mu in c(1:7)){data[[mu]] <- data[[mu]][ord]}
partition <- chr_partition(start, end)
# ----------------------------------------------------------- #
# Assemble datasets
#-------------------------------------------------------------#
# No of partitions
P <- partition[length(partition)]
data.Y <- vector('list',P)
cat('Assembling datasets...\n')
pB <- txtProgressBar(min=1,max=P, width =50L, style = 3)
for (p in 1:P){
setTxtProgressBar(pB, p)
index <- which(partition==p)
on.index <- NULL
off.index <- NULL
for(mu in 1:length(index)){
split.on <- as.numeric(strsplit(data$Z[index[mu]], split=':')[[1]])
split.off <- as.numeric(strsplit(data$z[index[mu]], split=':')[[1]])
if (data$flag1[index[mu]] == "83"){
split.on <- split.on - 1
split.off <- split.off - 1
}
on.index <- union(on.index, split.on)
off.index <- union(off.index, split.off)
}
on.index <- on.index[!is.na(on.index)]
off.index <- off.index[!is.na(off.index)]
CpG.loci <- union(on.index, off.index)
CpG.loci <- CpG.loci[order(CpG.loci)]
Y <- matrix(NA, length(index), length(CpG.loci))
colnames(Y) <- CpG.loci
for(mu in seq(1, length(index))){
split.on <- as.numeric(strsplit(data$Z[index[mu]], split=':')[[1]])
split.off <- as.numeric(strsplit(data$z[index[mu]], split=':')[[1]])
if (data$flag1[index[mu]] == "83"){
split.on <- split.on - 1
split.off <- split.off - 1
}
Y[mu,match(split.on,CpG.loci)] <- 1
Y[mu,match(split.off,CpG.loci)] <- 0
}
data.Y[[p]] <- Y
}
close(pB)
return(data.Y)
} | /R_files/data_assembly.R | permissive | mferrodo/bed-beta | R | false | false | 4,120 | r | Bayesian epiallele detection
Copyright (C) 2019 James E. Barrett (regmjeb@ucl.ac.uk)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
data_assembly <- function(sam_data){
# Flags 99 and 147 mean that the genome coordinate of a CpG aligns with the C (forward reads).
# Flags 83 and 163 mean that the coordinate alingns with the G (reverse reads).
# Therefore reverse reads will have 1 bp subtracted from start, end, and coords.
# After this forward and reverse reads are pooled together.
# sam_data[[1]] <- 'name'
# sam_data[[2]] <- 'flag1'
# sam_data[[3]] <- 'flag2'
# sam_data[[4]] <- 'start'
# sam_data[[5]] <- 'end'
# sam_data[[6]] <- 'z'
# sam_data[[7]] <- 'Z'
# source files (seems to be necessary for parallelisation)
for (src in dir('R_files')){
source(paste('R_files/',src,sep=''))
}
# ----------------------------------------------------------- #
# Extract relevant strand
#-------------------------------------------------------------#
data <- sam_data
data[[4]] <- as.numeric(data[[4]])
data[[5]] <- as.numeric(data[[5]])
# Remove NA starts and ends
na <- which(is.na(data[[5]])|is.na(data[[5]]))
for (mu in c(1:7)){data[[mu]] <- data[[mu]][-na]}
names(data) <- c('name','flag1','flag2','start','end','z','Z')
# Subtract 1 from reverse reads
data[[4]][which(data[[2]]=="83")] <- data[[4]][which(data[[2]]=="83")] - 1
data[[5]][which(data[[2]]=="83")] <- data[[5]][which(data[[2]]=="83")] - 1
# ----------------------------------------------------------- #
# Generate partition maps
#-------------------------------------------------------------#
start <- data[[4]]
end <- data[[5]]
ord <- order(start)
start <- start[ord]
end <- end[ord]
for (mu in c(1:7)){data[[mu]] <- data[[mu]][ord]}
partition <- chr_partition(start, end)
# ----------------------------------------------------------- #
# Assemble datasets
#-------------------------------------------------------------#
# No of partitions
P <- partition[length(partition)]
data.Y <- vector('list',P)
cat('Assembling datasets...\n')
pB <- txtProgressBar(min=1,max=P, width =50L, style = 3)
for (p in 1:P){
setTxtProgressBar(pB, p)
index <- which(partition==p)
on.index <- NULL
off.index <- NULL
for(mu in 1:length(index)){
split.on <- as.numeric(strsplit(data$Z[index[mu]], split=':')[[1]])
split.off <- as.numeric(strsplit(data$z[index[mu]], split=':')[[1]])
if (data$flag1[index[mu]] == "83"){
split.on <- split.on - 1
split.off <- split.off - 1
}
on.index <- union(on.index, split.on)
off.index <- union(off.index, split.off)
}
on.index <- on.index[!is.na(on.index)]
off.index <- off.index[!is.na(off.index)]
CpG.loci <- union(on.index, off.index)
CpG.loci <- CpG.loci[order(CpG.loci)]
Y <- matrix(NA, length(index), length(CpG.loci))
colnames(Y) <- CpG.loci
for(mu in seq(1, length(index))){
split.on <- as.numeric(strsplit(data$Z[index[mu]], split=':')[[1]])
split.off <- as.numeric(strsplit(data$z[index[mu]], split=':')[[1]])
if (data$flag1[index[mu]] == "83"){
split.on <- split.on - 1
split.off <- split.off - 1
}
Y[mu,match(split.on,CpG.loci)] <- 1
Y[mu,match(split.off,CpG.loci)] <- 0
}
data.Y[[p]] <- Y
}
close(pB)
return(data.Y)
} |
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 25303
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 25303
c
c Input Parameter (command line, file):
c input filename QBFLIB/Ayari/Adder/Adder2-16-s.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 22329
c no.of clauses 25303
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 25303
c
c QBFLIB/Ayari/Adder/Adder2-16-s.qdimacs 22329 25303 E1 [] 0 888 19039 25303 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Ayari/Adder/Adder2-16-s/Adder2-16-s.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 617 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 25303
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 25303
c
c Input Parameter (command line, file):
c input filename QBFLIB/Ayari/Adder/Adder2-16-s.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 22329
c no.of clauses 25303
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 25303
c
c QBFLIB/Ayari/Adder/Adder2-16-s.qdimacs 22329 25303 E1 [] 0 888 19039 25303 NONE
|
# This program contains code as well as responses to Linear Models
# Lab 4.
#
# @author Sarah Stepak
# @program Stepak LM4.R
# @since 04.08.2021
# @dataset Credit version 2.csv
# Import statements
library(ggplot2)
library(GGally)
credit <- CreditV2 #imports the dataset
#1. Use Income as the response and Age as the explanatory look construct a
# linear models and look for outliers and influential points.
#
credit.lm <- lm(Income~Age, data=credit)
plot(credit.lm, which=c(5))
#2. Construct a correlation matrix of all the numeric variables rounded
# to two decimal places. (removes col's 6-10)
#
# round(cor(credit[,-c(6:10)]),2) # Basic, Used if we do not have any NAs
round(cor(credit[,-c(6:10)],use="pairwise.complete.obs"),2) # Standard
#3. Graph the correlation matrix from the previous question.
#
credit.cor <- round(cor(credit[,-c(6:10)],use="pairwise.complete.obs"),2)
ggcorr(credit.cor)
#4. Repeat 3 after ordering the rows and columns and columns.
#
names(credit) #Prints col names
ord <- order(credit.cor[1,], decreasing = TRUE) # Orders the first row
ord # View the order
temp <- credit.cor[ord,ord] # Saves order to temp
temp #debug
ggcorr(temp)
rm(temp)
#5. Use ggpairs() with just the numeric variables.
ggpairs(subset(credit, select=c(Income, Rating, Limit, Cards, Age,
Education, Balance)))
#6. Pick 5 or 6 variables with a mix of numeric and categorical variables and
# make a ggpairs() plot.
ggpairs(subset(credit,select=c(Income, Rating, Limit, Cards, Age, Gender)),
ggplot2::aes(color=Gender))
#7. Use the variables you didn’t use in 6 to make
# another ggpairs() plot.
ggpairs(subset(credit,select=c(Education, Balance, Student, Married, County)),
ggplot2::aes(color=County)) | /Labs/Linear Models/Stepak LM4.R | no_license | dusteenie/MAT-328 | R | false | false | 1,788 | r | # This program contains code as well as responses to Linear Models
# Lab 4.
#
# @author Sarah Stepak
# @program Stepak LM4.R
# @since 04.08.2021
# @dataset Credit version 2.csv
# Import statements
library(ggplot2)
library(GGally)
credit <- CreditV2 #imports the dataset
#1. Use Income as the response and Age as the explanatory look construct a
# linear models and look for outliers and influential points.
#
credit.lm <- lm(Income~Age, data=credit)
plot(credit.lm, which=c(5))
#2. Construct a correlation matrix of all the numeric variables rounded
# to two decimal places. (removes col's 6-10)
#
# round(cor(credit[,-c(6:10)]),2) # Basic, Used if we do not have any NAs
round(cor(credit[,-c(6:10)],use="pairwise.complete.obs"),2) # Standard
#3. Graph the correlation matrix from the previous question.
#
credit.cor <- round(cor(credit[,-c(6:10)],use="pairwise.complete.obs"),2)
ggcorr(credit.cor)
#4. Repeat 3 after ordering the rows and columns and columns.
#
names(credit) #Prints col names
ord <- order(credit.cor[1,], decreasing = TRUE) # Orders the first row
ord # View the order
temp <- credit.cor[ord,ord] # Saves order to temp
temp #debug
ggcorr(temp)
rm(temp)
#5. Use ggpairs() with just the numeric variables.
ggpairs(subset(credit, select=c(Income, Rating, Limit, Cards, Age,
Education, Balance)))
#6. Pick 5 or 6 variables with a mix of numeric and categorical variables and
# make a ggpairs() plot.
ggpairs(subset(credit,select=c(Income, Rating, Limit, Cards, Age, Gender)),
ggplot2::aes(color=Gender))
#7. Use the variables you didn’t use in 6 to make
# another ggpairs() plot.
ggpairs(subset(credit,select=c(Education, Balance, Student, Married, County)),
ggplot2::aes(color=County)) |
#Clear Yer Stuff
rm(list=ls())
<<<<<<< HEAD
=======
#load data
cdata<- read.csv("~/GitHub/Dal-Intertidal-2014/Intertidal_Master_Data_Sheet_2014.csv")
>>>>>>> origin/master
#Load Data
TotalData<- read.csv("~/GitHub/Dal-Intertidal-2014/Intertidal_Master_Data_Sheet_2014.csv")
######### Make new data set with only SST and abundances#########
#Gives SST at high tide
unique(TotalData$SST[TotalData$Strata=="H"])
#Bind SST and abundance in data table
SSTTable <- cbind(TotalData$SST, TotalData$pc_Leatherista_difformus)
#Makes SSTTable a new data frame
SSTTable <- as.data.frame(SSTTable)
# change NAs to 0s
SSTTable$V2[is.na(SSTTable$V2)]=0
#Change Column Names
colnames(SSTTable) <- c("SST", "Percent Coverage")
##
require(plyr)
TotalData[is.na(TotalData)]=0
FigureTable<- ddply(TotalData,.(SST),summarize,
MeanPercentCoverage=mean(pc_Leatherista_difformus,na.rm = TRUE),
SDPercentCoverage=sd(pc_Leatherista_difformus,na.rm = TRUE),
NPercentCoverage=sum(is.na(pc_Leatherista_difformus)==F)
)
###### Make Graph ########
jpeg('SSTPercent Coverage1.jpeg', height=1200, width=2400, res=400, qual=100 )
barplot(FigureTable$MeanPercentCoverage, names.arg=FigureTable$SST, xlab="SST", ylab= expression ("Percent Coverage (%)"), main=" ")
dev.off()
getwd()
#### Error Bars ###
AB_mean <- FigureTable$MeanPercentCoverage*100
AB_se <- FigureTable$SDPercentCoverage/sqrt(FigureTable$NPercentCoverage)*100
jpeg('SSTPercent Coverage1.jpg', height=1200, width=2400, res=400, qual=100 )
mp <- barplot(AB_mean, names.arg=FigureTable$SST, xlab="SST", ylab= expression ("Percent coverage (%)"), main=" ",ylim=c(0,10)) # plots the barplot and saves the midpoints in mp
segments(mp, AB_mean + AB_se, mp,AB_mean, lwd=2) # plots positive error bar centered on mp
segments(mp - 0.1, AB_mean + AB_se, mp + 0.1, AB_mean + AB_se, lwd=2) #plots error bar caps
dev.off()
getwd()
<<<<<<< HEAD
=======
##################### site map #########################
install.packages("marmap")
install.packages("maps")
install.packages("mapdata")
require(maps)
require(mapdata)
# setlat/long limits
xlim=c(-67,-62)
ylim=c(43,46)
#Lat and Long in a table
location_data <- matrix(c(44.5001, -63.9250, 44.6332, -63.5987, 44.6148, -65.6774, 45.1596, -64.3581, 43.948126, -64.820485),ncol=2,byrow=TRUE)
colnames(location_data) <- c("Latitude", "Longitude")
rownames(location_data) <- c("Cranberry Cove", "South Street", "Bear River", "Wolfville", "Summerville")
location_data <- as.data.frame.matrix(location_data)
# plot basic map
map("worldHires", xlim=xlim, ylim=ylim, col="gray90", fill=TRUE, resolution=0) # make base map
map.axes() # add axes
map.scale(relwidth=0.5) # add scale bar proportioned to 50% width
points(location_data$Longitude,location_data$Latitude,pch=16,cex=1.5)
locations <- c("Cranberry Cove", "South Street", "Bear River", "Wolfville", "Summerville")
text(location_data$Longitude,location_data$Latitude,locations,col="black",cex=0.7,pos=2)
title("Sampling Stations")
#####################Diversity Map##################
#import intertidal master data sheet
cdata <- read.csv("~/Documents/Dalhousie/Intertidal Ecology/Dal-Intertidal-2014/Intertidal_Master_Data_Sheet_2014.csv")
#abundance and species richness
species <- cdata[,substr(names(cdata),1,2)=="ab"|substr(names(cdata),1,2)=="pc"]
test <- as.data.frame(matrix(NA,nrow=length(unique(cdata$Site_name)),ncol=length(names(species))))
names(test) <- names(species)
row.names(test) <- unique(cdata$Site_name)
for(site in unique(cdata$Site_name)){
for(sp in names(species)){
print(site)
print(sp)
test[unique(cdata$Site_name)==site,names(species)==sp] <- sum(is.na(species[cdata$Site_name==site,names(species)==sp])==F)>=1
}
}
#next, calculate diversity
#using abundance (if it starts with ab) sum up each column
#install vegan to calculate diversity
install.packages("vegan")
require(vegan)
#create a dataframe that only had abundances
species <- cdata[,substr(names(cdata),1,2)=="ab"]
#made all nas = 0
species[is.na(species)] <- 0
#runs diversity, puts into matrix
diversitydata <- diversity(species, "shannon")
#matrix into dataframe
ddiversity <- as.data.frame(diversitydata)
#adds columns from cdata
ddiversity$site <- cdata$Site_name
ddiversity$strata <- cdata$Strata
ddiversity$Longitude <- cdata$Longitude
ddiversity$Latitude <- cdata$Latitude
#map with diversity
install.packages("marmap")
install.packages("maps")
install.packages("mapdata")
require(maps)
require(mapdata)
# setlat/long limits
xlim=c(-67,-62)
ylim=c(43,46)
map("worldHires", xlim=xlim, ylim=ylim, col="gray90", fill=TRUE, resolution=0) # make base map
map.axes() # add axes
map.scale(relwidth=0.5)
locations <- c("Cranberry Cove", "Eastern Passage", "Bear River", "Wolfville", "Summerville")
text(ddiversity$Longitude,ddiversity$Latitude,ddiversity$site,col="black",cex=0.7,pos=2)
title("Sampling Stations by Diversity")
for(site in 1:length(ddiversity$diversitydata)){
size=log10(ddiversity$diversitydata[site]+1)*10 # calculate symbol size
points(ddiversity$Longitude[site],ddiversity$Latitude[site],pch=16,cex=size) # add site locations
}
#calculate richness
SpeciesRichness <- rowSums(test)
sp_richness <- as.data.frame(SpeciesRichness)
sp_richness$sites=row.names(sp_richness)
#add latitude and longitude
unique(cdata$Site_name)
unique(sp_richness$sites)
unique(cdata$Site_name)==unique(sp_richness$sites)
# make new dataframe by merging the species and site data by location/site_name
sp_richness_data <- merge(cdata,sp_richness, by.x="Site_name",by.y="sites")
#map
map("worldHires", xlim=xlim, ylim=ylim, col="gray90", fill=TRUE, resolution=0) # make base map
map.axes() # add axes
map.scale(relwidth=0.5)
text(sp_richness_data$Longitude,sp_richness_data$Latitude,sp_richness_data$Site_name,col="black",cex=0.7,pos=2)
title("Sampling Stations by Species Richness")
for(site in 1:length(sp_richness_data$SpeciesRichness)){
size=log10(sp_richness_data$SpeciesRichness[site]+1)*3 # calculate symbol size
points(sp_richness_data$Longitude[site],sp_richness_data$Latitude[site],pch=16,cex=size) # add site locations
}
>>>>>>> origin/master
| /Team B Code/SST vs. Abundance/pc_Leatherista_difformus.R | permissive | ohara-patrick/Dal-Intertidal-2014 | R | false | false | 6,656 | r | #Clear Yer Stuff
rm(list=ls())
<<<<<<< HEAD
=======
#load data
cdata<- read.csv("~/GitHub/Dal-Intertidal-2014/Intertidal_Master_Data_Sheet_2014.csv")
>>>>>>> origin/master
#Load Data
TotalData<- read.csv("~/GitHub/Dal-Intertidal-2014/Intertidal_Master_Data_Sheet_2014.csv")
######### Make new data set with only SST and abundances#########
#Gives SST at high tide
unique(TotalData$SST[TotalData$Strata=="H"])
#Bind SST and abundance in data table
SSTTable <- cbind(TotalData$SST, TotalData$pc_Leatherista_difformus)
#Makes SSTTable a new data frame
SSTTable <- as.data.frame(SSTTable)
# change NAs to 0s
SSTTable$V2[is.na(SSTTable$V2)]=0
#Change Column Names
colnames(SSTTable) <- c("SST", "Percent Coverage")
##
require(plyr)
TotalData[is.na(TotalData)]=0
FigureTable<- ddply(TotalData,.(SST),summarize,
MeanPercentCoverage=mean(pc_Leatherista_difformus,na.rm = TRUE),
SDPercentCoverage=sd(pc_Leatherista_difformus,na.rm = TRUE),
NPercentCoverage=sum(is.na(pc_Leatherista_difformus)==F)
)
###### Make Graph ########
jpeg('SSTPercent Coverage1.jpeg', height=1200, width=2400, res=400, qual=100 )
barplot(FigureTable$MeanPercentCoverage, names.arg=FigureTable$SST, xlab="SST", ylab= expression ("Percent Coverage (%)"), main=" ")
dev.off()
getwd()
#### Error Bars ###
AB_mean <- FigureTable$MeanPercentCoverage*100
AB_se <- FigureTable$SDPercentCoverage/sqrt(FigureTable$NPercentCoverage)*100
jpeg('SSTPercent Coverage1.jpg', height=1200, width=2400, res=400, qual=100 )
mp <- barplot(AB_mean, names.arg=FigureTable$SST, xlab="SST", ylab= expression ("Percent coverage (%)"), main=" ",ylim=c(0,10)) # plots the barplot and saves the midpoints in mp
segments(mp, AB_mean + AB_se, mp,AB_mean, lwd=2) # plots positive error bar centered on mp
segments(mp - 0.1, AB_mean + AB_se, mp + 0.1, AB_mean + AB_se, lwd=2) #plots error bar caps
dev.off()
getwd()
<<<<<<< HEAD
=======
##################### site map #########################
install.packages("marmap")
install.packages("maps")
install.packages("mapdata")
require(maps)
require(mapdata)
# setlat/long limits
xlim=c(-67,-62)
ylim=c(43,46)
#Lat and Long in a table
location_data <- matrix(c(44.5001, -63.9250, 44.6332, -63.5987, 44.6148, -65.6774, 45.1596, -64.3581, 43.948126, -64.820485),ncol=2,byrow=TRUE)
colnames(location_data) <- c("Latitude", "Longitude")
rownames(location_data) <- c("Cranberry Cove", "South Street", "Bear River", "Wolfville", "Summerville")
location_data <- as.data.frame.matrix(location_data)
# plot basic map
map("worldHires", xlim=xlim, ylim=ylim, col="gray90", fill=TRUE, resolution=0) # make base map
map.axes() # add axes
map.scale(relwidth=0.5) # add scale bar proportioned to 50% width
points(location_data$Longitude,location_data$Latitude,pch=16,cex=1.5)
locations <- c("Cranberry Cove", "South Street", "Bear River", "Wolfville", "Summerville")
text(location_data$Longitude,location_data$Latitude,locations,col="black",cex=0.7,pos=2)
title("Sampling Stations")
#####################Diversity Map##################
#import intertidal master data sheet
cdata <- read.csv("~/Documents/Dalhousie/Intertidal Ecology/Dal-Intertidal-2014/Intertidal_Master_Data_Sheet_2014.csv")
#abundance and species richness
species <- cdata[,substr(names(cdata),1,2)=="ab"|substr(names(cdata),1,2)=="pc"]
test <- as.data.frame(matrix(NA,nrow=length(unique(cdata$Site_name)),ncol=length(names(species))))
names(test) <- names(species)
row.names(test) <- unique(cdata$Site_name)
for(site in unique(cdata$Site_name)){
for(sp in names(species)){
print(site)
print(sp)
test[unique(cdata$Site_name)==site,names(species)==sp] <- sum(is.na(species[cdata$Site_name==site,names(species)==sp])==F)>=1
}
}
#next, calculate diversity
#using abundance (if it starts with ab) sum up each column
#install vegan to calculate diversity
install.packages("vegan")
require(vegan)
#create a dataframe that only had abundances
species <- cdata[,substr(names(cdata),1,2)=="ab"]
#made all nas = 0
species[is.na(species)] <- 0
#runs diversity, puts into matrix
diversitydata <- diversity(species, "shannon")
#matrix into dataframe
ddiversity <- as.data.frame(diversitydata)
#adds columns from cdata
ddiversity$site <- cdata$Site_name
ddiversity$strata <- cdata$Strata
ddiversity$Longitude <- cdata$Longitude
ddiversity$Latitude <- cdata$Latitude
#map with diversity
install.packages("marmap")
install.packages("maps")
install.packages("mapdata")
require(maps)
require(mapdata)
# setlat/long limits
xlim=c(-67,-62)
ylim=c(43,46)
map("worldHires", xlim=xlim, ylim=ylim, col="gray90", fill=TRUE, resolution=0) # make base map
map.axes() # add axes
map.scale(relwidth=0.5)
locations <- c("Cranberry Cove", "Eastern Passage", "Bear River", "Wolfville", "Summerville")
text(ddiversity$Longitude,ddiversity$Latitude,ddiversity$site,col="black",cex=0.7,pos=2)
title("Sampling Stations by Diversity")
for(site in 1:length(ddiversity$diversitydata)){
size=log10(ddiversity$diversitydata[site]+1)*10 # calculate symbol size
points(ddiversity$Longitude[site],ddiversity$Latitude[site],pch=16,cex=size) # add site locations
}
#calculate richness
SpeciesRichness <- rowSums(test)
sp_richness <- as.data.frame(SpeciesRichness)
sp_richness$sites=row.names(sp_richness)
#add latitude and longitude
unique(cdata$Site_name)
unique(sp_richness$sites)
unique(cdata$Site_name)==unique(sp_richness$sites)
# make new dataframe by merging the species and site data by location/site_name
sp_richness_data <- merge(cdata,sp_richness, by.x="Site_name",by.y="sites")
#map
map("worldHires", xlim=xlim, ylim=ylim, col="gray90", fill=TRUE, resolution=0) # make base map
map.axes() # add axes
map.scale(relwidth=0.5)
text(sp_richness_data$Longitude,sp_richness_data$Latitude,sp_richness_data$Site_name,col="black",cex=0.7,pos=2)
title("Sampling Stations by Species Richness")
for(site in 1:length(sp_richness_data$SpeciesRichness)){
size=log10(sp_richness_data$SpeciesRichness[site]+1)*3 # calculate symbol size
points(sp_richness_data$Longitude[site],sp_richness_data$Latitude[site],pch=16,cex=size) # add site locations
}
>>>>>>> origin/master
|
#You should create one R script called run_analysis.R that does the following.
# 1 Merges the training and the test sets to create one data set.
# 2 Extracts only the measurements on the mean and standard deviation for each measurement.
# 3 Uses descriptive activity names to name the activities in the data set
# 4 Appropriately labels the data set with descriptive variable names.
# 5 From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# load libraries
library(dplyr)
library(reshape2)
library(tidyr)
# set working directory
setwd("./Github/datasciencecoursera/project/")
# Load datasets
testx <- read.table("./UCI HAR Dataset/test/X_test.txt")
testy <- read.table("./UCI HAR Dataset/test/y_test.txt")
testsub <- read.table("./UCI HAR Dataset/test/subject_test.txt")
trainx <- read.table("./UCI HAR Dataset/train/X_train.txt")
trainy <- read.table("./UCI HAR Dataset/train/y_train.txt")
trainsub <- read.table("./UCI HAR Dataset/train/subject_train.txt")
features <- read.table("./UCI HAR Dataset/features.txt")
actnam <- read.table("./UCI HAR Dataset/activity_labels.txt")
actnam <- cbind(actnam, c("walk", "walkup", "walkdown", "sit", "stand", "lay"))
# Rename variables
names(actnam) <- c("id", "long", "activity")
names(testx) <- features[[2]]
names(trainx) <- features[[2]]
names(testy) <- "activity"
names(trainy) <- "activity"
names(trainsub) <- "subject"
names(testsub) <- "subject"
# Combining the columns
testx <- cbind(testy, testsub, testx)
trainx <- cbind(trainy, trainsub, trainx)
ka1 <- filter(features, grepl("mean", features$V2) | grepl("std", features$V2))
vars <- rbind(1,2,ka1[1]+2)
train <- trainx[vars[,1]]
test <- testx[vars[,1]]
# Combining the datasets
data <- rbind(test, train)
# renaming the activities
data$activity <- sub(1, actnam[1,3],data$activity)
data$activity <- sub(2, actnam[2,3],data$activity)
data$activity <- sub(3, actnam[3,3],data$activity)
data$activity <- sub(4, actnam[4,3],data$activity)
data$activity <- sub(5, actnam[5,3],data$activity)
data$activity <- sub(6, actnam[6,3],data$activity)
# renaming the variables
names(data) <- tolower(names(data))
names(data) <- gsub("-", "", names(data))
# calculating the means for subject and activity pairs
# I had issues with the dpyr package so I ended up using another way to calculate the mean.
# When dcast() is used it requires aggregation method if observations are not identified.
# It worked now for this case.
data <- data.frame(data)
new_data <- melt(data, id.vars = c(names(data[1:2])), measure.vars = c(names(data[3:81])))
new_data <- dcast(new_data, subject + activity ~ variable, fun.aggregate = mean)
# write table
write.table(new_data, file = "tidy.txt", row.names = FALSE)
| /project/run_analysis.R | no_license | tjautio/datasciencecoursera | R | false | false | 2,804 | r | #You should create one R script called run_analysis.R that does the following.
# 1 Merges the training and the test sets to create one data set.
# 2 Extracts only the measurements on the mean and standard deviation for each measurement.
# 3 Uses descriptive activity names to name the activities in the data set
# 4 Appropriately labels the data set with descriptive variable names.
# 5 From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# load libraries
library(dplyr)
library(reshape2)
library(tidyr)
# set working directory
setwd("./Github/datasciencecoursera/project/")
# Load datasets
testx <- read.table("./UCI HAR Dataset/test/X_test.txt")
testy <- read.table("./UCI HAR Dataset/test/y_test.txt")
testsub <- read.table("./UCI HAR Dataset/test/subject_test.txt")
trainx <- read.table("./UCI HAR Dataset/train/X_train.txt")
trainy <- read.table("./UCI HAR Dataset/train/y_train.txt")
trainsub <- read.table("./UCI HAR Dataset/train/subject_train.txt")
features <- read.table("./UCI HAR Dataset/features.txt")
actnam <- read.table("./UCI HAR Dataset/activity_labels.txt")
actnam <- cbind(actnam, c("walk", "walkup", "walkdown", "sit", "stand", "lay"))
# Rename variables
names(actnam) <- c("id", "long", "activity")
names(testx) <- features[[2]]
names(trainx) <- features[[2]]
names(testy) <- "activity"
names(trainy) <- "activity"
names(trainsub) <- "subject"
names(testsub) <- "subject"
# Combining the columns
testx <- cbind(testy, testsub, testx)
trainx <- cbind(trainy, trainsub, trainx)
ka1 <- filter(features, grepl("mean", features$V2) | grepl("std", features$V2))
vars <- rbind(1,2,ka1[1]+2)
train <- trainx[vars[,1]]
test <- testx[vars[,1]]
# Combining the datasets
data <- rbind(test, train)
# renaming the activities
data$activity <- sub(1, actnam[1,3],data$activity)
data$activity <- sub(2, actnam[2,3],data$activity)
data$activity <- sub(3, actnam[3,3],data$activity)
data$activity <- sub(4, actnam[4,3],data$activity)
data$activity <- sub(5, actnam[5,3],data$activity)
data$activity <- sub(6, actnam[6,3],data$activity)
# renaming the variables
names(data) <- tolower(names(data))
names(data) <- gsub("-", "", names(data))
# calculating the means for subject and activity pairs
# I had issues with the dpyr package so I ended up using another way to calculate the mean.
# When dcast() is used it requires aggregation method if observations are not identified.
# It worked now for this case.
data <- data.frame(data)
new_data <- melt(data, id.vars = c(names(data[1:2])), measure.vars = c(names(data[3:81])))
new_data <- dcast(new_data, subject + activity ~ variable, fun.aggregate = mean)
# write table
write.table(new_data, file = "tidy.txt", row.names = FALSE)
|
library(meta)
### Name: settings.meta
### Title: Print and change default settings to conduct and print or plot
### meta-analyses in R package *meta*.
### Aliases: settings.meta
### ** Examples
#
# Get listing of current settings
#
settings.meta("print")
#
# Meta-analyses using default settings
#
metabin(10, 20, 15, 20)
metaprop(4, 20)
metabin(10, 20, 15, 20, sm="RD")
metaprop(4, 20, sm="PLN")
#
# Change summary measure for R functions metabin and metaprop
# and store old settings
#
oldset <- settings.meta(smbin="RD", smprop="PLN")
#
metabin(10, 20, 15, 20)
metaprop(4, 20)
#
# Use old settings
#
settings.meta(oldset)
#
# Change level used to calculate confidence intervals
# (99%-CI for studies, 99.9%-CI for pooled effects)
#
metagen(1:3, (2:4)/10, sm="MD")
settings.meta(level=0.99, level.comb=0.999)
metagen(1:3, (2:4)/10, sm="MD")
#
# Always print a prediction interval
#
settings.meta(prediction=TRUE)
metagen(1:3, (2:4)/10, sm="MD")
metagen(4:6, (4:2)/10, sm="MD")
#
# Try to set unknown argument results in a warning
#
settings.meta(unknownarg=TRUE)
#
# Reset to default settings of R package meta
#
settings.meta("reset")
metabin(10, 20, 15, 20)
metaprop(4, 20)
metagen(1:3, (2:4)/10, sm="MD")
#
# Do not back transform results (e.g. print log odds ratios instead of
# odds ratios, print transformed correlations/proportions instead of
# correlations/proportions)
#
settings.meta(backtransf=FALSE)
metabin(10, 20, 15, 20)
metaprop(4, 20)
metacor(c(0.85, 0.7, 0.95), c(20, 40, 10))
#
# Forest plot using RevMan 5 style
#
settings.meta("revman5")
forest(metagen(1:3, (2:4)/10, sm="MD", comb.fixed=FALSE),
label.left="Favours A", label.right="Favours B",
colgap.studlab = grid::unit(2, "cm"),
colgap.forest.left = grid::unit(0.2, "cm"))
#
# Forest plot using JAMA style
#
settings.meta("jama")
forest(metagen(1:3, (2:4)/10, sm="MD", comb.fixed=FALSE),
label.left="Favours A", label.right="Favours B",
colgap.studlab = grid::unit(2, "cm"),
colgap.forest.left = grid::unit(0.2, "cm"))
#
# Use slightly different layout for confidence intervals
# (especially useful if upper confidence limit can be negative)
#
settings.meta(CIseparator=" - ")
forest(metagen(-(1:3), (2:4)/10, sm="MD", comb.fixed=FALSE),
label.left="Favours A", label.right="Favours B",
colgap.studlab = grid::unit(2, "cm"),
colgap.forest.left = grid::unit(0.2, "cm"))
# Use old settings
#
settings.meta(oldset)
| /data/genthat_extracted_code/meta/examples/settings.meta.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 2,470 | r | library(meta)
### Name: settings.meta
### Title: Print and change default settings to conduct and print or plot
### meta-analyses in R package *meta*.
### Aliases: settings.meta
### ** Examples
#
# Get listing of current settings
#
settings.meta("print")
#
# Meta-analyses using default settings
#
metabin(10, 20, 15, 20)
metaprop(4, 20)
metabin(10, 20, 15, 20, sm="RD")
metaprop(4, 20, sm="PLN")
#
# Change summary measure for R functions metabin and metaprop
# and store old settings
#
oldset <- settings.meta(smbin="RD", smprop="PLN")
#
metabin(10, 20, 15, 20)
metaprop(4, 20)
#
# Use old settings
#
settings.meta(oldset)
#
# Change level used to calculate confidence intervals
# (99%-CI for studies, 99.9%-CI for pooled effects)
#
metagen(1:3, (2:4)/10, sm="MD")
settings.meta(level=0.99, level.comb=0.999)
metagen(1:3, (2:4)/10, sm="MD")
#
# Always print a prediction interval
#
settings.meta(prediction=TRUE)
metagen(1:3, (2:4)/10, sm="MD")
metagen(4:6, (4:2)/10, sm="MD")
#
# Try to set unknown argument results in a warning
#
settings.meta(unknownarg=TRUE)
#
# Reset to default settings of R package meta
#
settings.meta("reset")
metabin(10, 20, 15, 20)
metaprop(4, 20)
metagen(1:3, (2:4)/10, sm="MD")
#
# Do not back transform results (e.g. print log odds ratios instead of
# odds ratios, print transformed correlations/proportions instead of
# correlations/proportions)
#
settings.meta(backtransf=FALSE)
metabin(10, 20, 15, 20)
metaprop(4, 20)
metacor(c(0.85, 0.7, 0.95), c(20, 40, 10))
#
# Forest plot using RevMan 5 style
#
settings.meta("revman5")
forest(metagen(1:3, (2:4)/10, sm="MD", comb.fixed=FALSE),
label.left="Favours A", label.right="Favours B",
colgap.studlab = grid::unit(2, "cm"),
colgap.forest.left = grid::unit(0.2, "cm"))
#
# Forest plot using JAMA style
#
settings.meta("jama")
forest(metagen(1:3, (2:4)/10, sm="MD", comb.fixed=FALSE),
label.left="Favours A", label.right="Favours B",
colgap.studlab = grid::unit(2, "cm"),
colgap.forest.left = grid::unit(0.2, "cm"))
#
# Use slightly different layout for confidence intervals
# (especially useful if upper confidence limit can be negative)
#
settings.meta(CIseparator=" - ")
forest(metagen(-(1:3), (2:4)/10, sm="MD", comb.fixed=FALSE),
label.left="Favours A", label.right="Favours B",
colgap.studlab = grid::unit(2, "cm"),
colgap.forest.left = grid::unit(0.2, "cm"))
# Use old settings
#
settings.meta(oldset)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/markerDB.R
\name{getSpeciesInfo}
\alias{getSpeciesInfo}
\title{Get informations of a species in MarkerDB}
\usage{
getSpeciesInfo(species)
}
\arguments{
\item{species}{Character variable of species name}
}
\value{
List of species annotations
}
\description{
This function helps to get information of a specieid species in MarkerDB.
The user not only gets the names of all celltypes known within in this species,
but also its latin and ENSEMBL name.
}
| /man/getSpeciesInfo.Rd | no_license | derpylz/markerDB | R | false | true | 528 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/markerDB.R
\name{getSpeciesInfo}
\alias{getSpeciesInfo}
\title{Get informations of a species in MarkerDB}
\usage{
getSpeciesInfo(species)
}
\arguments{
\item{species}{Character variable of species name}
}
\value{
List of species annotations
}
\description{
This function helps to get information of a specieid species in MarkerDB.
The user not only gets the names of all celltypes known within in this species,
but also its latin and ENSEMBL name.
}
|
\name{publish_github}
\alias{publish_github}
\title{Publish slide deck to Github}
\usage{
publish_github(repo, username = getOption("github.user"))
}
\arguments{
\item{repo}{github reponame}
\item{username}{github username}
\item{gitHost}{github host}
}
\description{
You will need \code{git} installed on your computer and a
\code{github} account. In addition, you will \code{SSH}
access to \code{github}. See
\url{https://help.github.com/articles/generating-ssh-keys}
on how to set up \code{SSH} access
}
\details{
Login with your github account and create a new
repository
\url{https://help.github.com/articles/creating-a-new-repository}.
Note that Github will prompt you to add a README file,
but just use the defaults so that your repo is empty. You
will need to have \code{git} installed on your computer
and be able to push to \code{github} using SSH
}
\seealso{
Other publish: \code{\link{publish}},
\code{\link{publish_dropbox}}
}
| /man/publish_github.Rd | no_license | dossett/slidify | R | false | false | 981 | rd | \name{publish_github}
\alias{publish_github}
\title{Publish slide deck to Github}
\usage{
publish_github(repo, username = getOption("github.user"))
}
\arguments{
\item{repo}{github reponame}
\item{username}{github username}
\item{gitHost}{github host}
}
\description{
You will need \code{git} installed on your computer and a
\code{github} account. In addition, you will \code{SSH}
access to \code{github}. See
\url{https://help.github.com/articles/generating-ssh-keys}
on how to set up \code{SSH} access
}
\details{
Login with your github account and create a new
repository
\url{https://help.github.com/articles/creating-a-new-repository}.
Note that Github will prompt you to add a README file,
but just use the defaults so that your repo is empty. You
will need to have \code{git} installed on your computer
and be able to push to \code{github} using SSH
}
\seealso{
Other publish: \code{\link{publish}},
\code{\link{publish_dropbox}}
}
|
#Normal distribution line
# -- -2 -- -1 -- 0 -- 1 -- 2 (number line)
#Want number close to 0 and default parameters provide this
#rnorm generates set of numbers accord. to random distribution
rnorm(1)
#remove variable answer stored and clear var contents before each iteration
#assign randomly generated number to var x
#if statement runs once and executes statement if x > 1
#else statement executes if x < 1
rm(answer)
x <- rnorm(1)
if(x > 1){
answer <- "Greater than 1"
} else {
answer <- "Less than or equal to 1"
}
#Test 3 different ranges/conditions: if/else if condition
#Remove contents of var answer
#Assign x a random number on normal distribution (-3 to 3)
#If x is greater than 1 execute first if-statement
#Else if x is greater or equal to -1 then execute nested if-else statement
#Else if x is less than -1 execute final else statement
rm(answer)
x <- rnorm(1)
if(x > 1){
answer <- "Greater than 1"
} else{
if(x >= -1){
answer <- "Between -1 and 1"
} else {
answer <- "Less than -1"
}
}
#Nested else-if statement ^^ same as above but more efficient
#Chaining statements in shorter, concise code
rm(answer)
x <- rnorm(1)
if(x > 1){
answer <- "Greater than 1"
} else if(x >= -1){
answer <- "Between -1 and 1"
} else{
answer <- "Less than -1"
}
| /6_ifElseStatement.R | no_license | tlaxson14/Rprojects | R | false | false | 1,308 | r | #Normal distribution line
# -- -2 -- -1 -- 0 -- 1 -- 2 (number line)
#Want number close to 0 and default parameters provide this
#rnorm generates set of numbers accord. to random distribution
rnorm(1)
#remove variable answer stored and clear var contents before each iteration
#assign randomly generated number to var x
#if statement runs once and executes statement if x > 1
#else statement executes if x < 1
rm(answer)
x <- rnorm(1)
if(x > 1){
answer <- "Greater than 1"
} else {
answer <- "Less than or equal to 1"
}
#Test 3 different ranges/conditions: if/else if condition
#Remove contents of var answer
#Assign x a random number on normal distribution (-3 to 3)
#If x is greater than 1 execute first if-statement
#Else if x is greater or equal to -1 then execute nested if-else statement
#Else if x is less than -1 execute final else statement
rm(answer)
x <- rnorm(1)
if(x > 1){
answer <- "Greater than 1"
} else{
if(x >= -1){
answer <- "Between -1 and 1"
} else {
answer <- "Less than -1"
}
}
#Nested else-if statement ^^ same as above but more efficient
#Chaining statements in shorter, concise code
rm(answer)
x <- rnorm(1)
if(x > 1){
answer <- "Greater than 1"
} else if(x >= -1){
answer <- "Between -1 and 1"
} else{
answer <- "Less than -1"
}
|
# Pacotes -----------------------------------------------------------------
library(dplyr)
library(ggplot2)
library(ggthemes)
library(hrbrthemes)
# Bases -------------------------------------------------------------------
int <- readxl::read_xlsx('internacao.xlsx')
# Criacao de variaveis - Internacao ----------------------------------------------------
## dias internado
int$dtIncioInternacao <- as.Date(int$dtIncioInternacao)
int$dtFimInternacaoReal <- as.Date(int$dtFimInternacaoReal)
int$dias_internado <- difftime(int$dtFimInternacaoReal, int$dtIncioInternacao, unit = 'days')
int$dias_internado <- as.numeric(int$dias_internado)
int$dias_internado <- int$dias_internado + 1
## quantidade de internacoes por pessoa
qt_int <- int %>%
dplyr::group_by(IdPessoa) %>%
dplyr::summarise(qtde_int = sum(!is.na(unique(IdInternacao))))
int <- int %>%
dplyr::left_join(qt_int, by = "IdPessoa")
## flag reinternacao
int_auxiliar <- int %>%
dplyr::select(c(IdPessoa, IdInternacao, dtIncioInternacao, dtFimInternacaoReal, qtde_int)) %>%
dplyr::distinct() %>%
dplyr::distinct() %>%
dplyr::filter(qtde_int > 1)
################################ criando coluna adicional com data da ultima alta ################################
int_auxiliar <- int_auxiliar[order(int_auxiliar$IdPessoa, int_auxiliar$dtIncioInternacao),]
int_auxiliar$indice <- 1:nrow(int_auxiliar)
int_auxiliar_antes = data.frame(IdPessoa=int_auxiliar$IdPessoa,indice=int_auxiliar$indice+1, dtFimInternacaoRealAnterior=int_auxiliar$dtFimInternacaoReal)
int_auxiliar2 <- int_auxiliar %>%
dplyr::left_join(int_auxiliar_antes, c("dtFimInternacaoRealAnterior"), by = c("IdPessoa"="IdPessoa", "indice"="indice"))
#################################################################################################################
int_auxiliar2$dias = difftime(int_auxiliar2$dtIncioInternacao, int_auxiliar2$dtFimInternacaoRealAnterior)
int_auxiliar2$flag <- ifelse(int_auxiliar2$dias < 31, 1, 0)
# historia das internações
int_auxiliar2$IdPessoa = as.character(int_auxiliar2$IdPessoa)
ggplot(int_auxiliar2) +
geom_segment( aes(x=IdPessoa, xend=IdPessoa, y=dtFimInternacaoReal, yend=dtIncioInternacao), color="black") +
geom_point( aes(x=IdPessoa, y=dtIncioInternacao), color=rgb(0.2,0.7,0.1,0.5), size=3 ) +
geom_point( aes(x=IdPessoa, y=dtFimInternacaoReal), color=rgb(0.7,0.2,0.1,0.5), size=3 ) +
coord_flip()+
theme_ipsum() +
theme(
legend.position = "none",
) +
xlab("") +
ylab("Value of Y")
| /historias.r | no_license | luizgonzagasilva/Probabilidade-e-Estatistica | R | false | false | 2,576 | r |
# Pacotes -----------------------------------------------------------------
library(dplyr)
library(ggplot2)
library(ggthemes)
library(hrbrthemes)
# Bases -------------------------------------------------------------------
int <- readxl::read_xlsx('internacao.xlsx')
# Criacao de variaveis - Internacao ----------------------------------------------------
## dias internado
int$dtIncioInternacao <- as.Date(int$dtIncioInternacao)
int$dtFimInternacaoReal <- as.Date(int$dtFimInternacaoReal)
int$dias_internado <- difftime(int$dtFimInternacaoReal, int$dtIncioInternacao, unit = 'days')
int$dias_internado <- as.numeric(int$dias_internado)
int$dias_internado <- int$dias_internado + 1
## quantidade de internacoes por pessoa
qt_int <- int %>%
dplyr::group_by(IdPessoa) %>%
dplyr::summarise(qtde_int = sum(!is.na(unique(IdInternacao))))
int <- int %>%
dplyr::left_join(qt_int, by = "IdPessoa")
## flag reinternacao
int_auxiliar <- int %>%
dplyr::select(c(IdPessoa, IdInternacao, dtIncioInternacao, dtFimInternacaoReal, qtde_int)) %>%
dplyr::distinct() %>%
dplyr::distinct() %>%
dplyr::filter(qtde_int > 1)
################################ criando coluna adicional com data da ultima alta ################################
int_auxiliar <- int_auxiliar[order(int_auxiliar$IdPessoa, int_auxiliar$dtIncioInternacao),]
int_auxiliar$indice <- 1:nrow(int_auxiliar)
int_auxiliar_antes = data.frame(IdPessoa=int_auxiliar$IdPessoa,indice=int_auxiliar$indice+1, dtFimInternacaoRealAnterior=int_auxiliar$dtFimInternacaoReal)
int_auxiliar2 <- int_auxiliar %>%
dplyr::left_join(int_auxiliar_antes, c("dtFimInternacaoRealAnterior"), by = c("IdPessoa"="IdPessoa", "indice"="indice"))
#################################################################################################################
int_auxiliar2$dias = difftime(int_auxiliar2$dtIncioInternacao, int_auxiliar2$dtFimInternacaoRealAnterior)
int_auxiliar2$flag <- ifelse(int_auxiliar2$dias < 31, 1, 0)
# historia das internações
int_auxiliar2$IdPessoa = as.character(int_auxiliar2$IdPessoa)
ggplot(int_auxiliar2) +
geom_segment( aes(x=IdPessoa, xend=IdPessoa, y=dtFimInternacaoReal, yend=dtIncioInternacao), color="black") +
geom_point( aes(x=IdPessoa, y=dtIncioInternacao), color=rgb(0.2,0.7,0.1,0.5), size=3 ) +
geom_point( aes(x=IdPessoa, y=dtFimInternacaoReal), color=rgb(0.7,0.2,0.1,0.5), size=3 ) +
coord_flip()+
theme_ipsum() +
theme(
legend.position = "none",
) +
xlab("") +
ylab("Value of Y")
|
###############################################################################
# Mosaics MODIS MCD43A4 (500m Nadir BRDF-Adjusted Reflectance ) product to
# cover the spatial extent of the ZOI/CSA/PA boundary of each team site.
#
# Requires a GDAL installation that supports HDF4 files - on Windows, see
# OSGEO4W to meet this dependency.
###############################################################################
library(rgdal)
library(raster)
library(stringr)
library(gdalUtils)
library(rgeos)
library(gfcanalysis) # for utm_zone
library(doParallel)
library(foreach)
n_cpus <- 12
overwrite <- TRUE
registerDoParallel(n_cpus)
zoi_folder <- '/localdisk/home/azvoleff/ZOI_CSA_PAs'
in_base_dir <- '/localdisk/home/azvoleff/MODIS_NBAR_Reflectance'
out_base_dir <- '/localdisk/home/azvoleff/MODIS_NBAR_Reflectance'
in_folder <- file.path(in_base_dir, 'ORIGINALS')
out_folder <- file.path(in_base_dir, 'ZOI_Crops')
hdfs <- dir(in_folder, pattern='.hdf$')
tile_key <- read.csv('TEAM_Site_MODIS_Tiles.csv')
for (sitecode in unique(tile_key$sitecode)) {
timestamp()
message('Processing ', sitecode, '...')
site_rows <- tile_key[tile_key$sitecode == sitecode, ]
tile_ids <- paste0('h', sprintf('%02i', site_rows$h),
'v', sprintf('%02i', site_rows$v))
load(file.path(zoi_folder, paste0(sitecode, '_ZOI_CSA_PA.RData')))
aoi <- gConvexHull(aois)
aoi <- spTransform(aoi, CRS(utm_zone(aoi, proj4string=TRUE)))
aoi <- gBuffer(aoi, width=5000)
t_srs <- proj4string(aoi)
te <- as.numeric(bbox(aoi))
tile_regex <- paste(paste0('(', tile_ids, ')'), collapse='|')
tiles <- hdfs[grepl(tile_regex, hdfs)]
if (length(tiles) == 0) {
stop('no tiles found')
}
product <- gsub('[.]', '', str_extract(tiles, '^[a-zA-Z0-9]*[.]'))
if (length(unique(product)) != 1) {
stop('tiles are from more than one MODIS product')
}
product <- product[1]
dates <- unique(as.Date(str_extract(tiles, '[0-9]{7}'), '%Y%j'))
foreach(this_date=iter(dates),
.packages=c('raster', 'gdalUtils', 'stringr'),
.inorder=FALSE) %dopar% {
message(this_date)
tiles_by_date <- tiles[grepl(format(this_date, '%Y%j'), tiles)]
srcfiles <- file.path(in_folder, tiles_by_date)
out_base <- file.path(out_folder,
paste(product, sitecode,
format(this_date, '%Y%j'), sep='_'))
subdatasets <- get_subdatasets(srcfiles[[1]])
band_names <- data.frame(band=seq(1, length(subdatasets)),
name=gsub(':', '', str_extract(subdatasets, ':[a-zA-Z0-9_]*$')))
write.csv(band_names, file=paste0(out_base, '_bandnames.csv'), row.names=FALSE)
# First build a VRT with all the bands in the HDF file (this mosaics
# the tiles, but with delayed computation - the actual mosaicing
# computations won't take place until the gdalwarp line below)
vrt_file <- paste0(out_base, '_temp', n, '.vrt')
gdalbuildvrt(srcfiles, vrt_file, separate=TRUE)
# Mosaic, reproject, and crop vrts
gdalwarp(vrt_file, dstfile, t_srs=t_srs, te=te,
tr=c(500, 500), r='cubicspline', overwrite=overwrite)
# Delete the temp files
unlink(vrt_files)
}
}
| /3_mosaic_MODIS_MCD43A4.R | no_license | Jianghao/MODIS_MCD43A4 | R | false | false | 3,353 | r | ###############################################################################
# Mosaics MODIS MCD43A4 (500m Nadir BRDF-Adjusted Reflectance ) product to
# cover the spatial extent of the ZOI/CSA/PA boundary of each team site.
#
# Requires a GDAL installation that supports HDF4 files - on Windows, see
# OSGEO4W to meet this dependency.
###############################################################################
library(rgdal)
library(raster)
library(stringr)
library(gdalUtils)
library(rgeos)
library(gfcanalysis) # for utm_zone
library(doParallel)
library(foreach)
n_cpus <- 12
overwrite <- TRUE
registerDoParallel(n_cpus)
zoi_folder <- '/localdisk/home/azvoleff/ZOI_CSA_PAs'
in_base_dir <- '/localdisk/home/azvoleff/MODIS_NBAR_Reflectance'
out_base_dir <- '/localdisk/home/azvoleff/MODIS_NBAR_Reflectance'
in_folder <- file.path(in_base_dir, 'ORIGINALS')
out_folder <- file.path(in_base_dir, 'ZOI_Crops')
hdfs <- dir(in_folder, pattern='.hdf$')
tile_key <- read.csv('TEAM_Site_MODIS_Tiles.csv')
for (sitecode in unique(tile_key$sitecode)) {
timestamp()
message('Processing ', sitecode, '...')
site_rows <- tile_key[tile_key$sitecode == sitecode, ]
tile_ids <- paste0('h', sprintf('%02i', site_rows$h),
'v', sprintf('%02i', site_rows$v))
load(file.path(zoi_folder, paste0(sitecode, '_ZOI_CSA_PA.RData')))
aoi <- gConvexHull(aois)
aoi <- spTransform(aoi, CRS(utm_zone(aoi, proj4string=TRUE)))
aoi <- gBuffer(aoi, width=5000)
t_srs <- proj4string(aoi)
te <- as.numeric(bbox(aoi))
tile_regex <- paste(paste0('(', tile_ids, ')'), collapse='|')
tiles <- hdfs[grepl(tile_regex, hdfs)]
if (length(tiles) == 0) {
stop('no tiles found')
}
product <- gsub('[.]', '', str_extract(tiles, '^[a-zA-Z0-9]*[.]'))
if (length(unique(product)) != 1) {
stop('tiles are from more than one MODIS product')
}
product <- product[1]
dates <- unique(as.Date(str_extract(tiles, '[0-9]{7}'), '%Y%j'))
foreach(this_date=iter(dates),
.packages=c('raster', 'gdalUtils', 'stringr'),
.inorder=FALSE) %dopar% {
message(this_date)
tiles_by_date <- tiles[grepl(format(this_date, '%Y%j'), tiles)]
srcfiles <- file.path(in_folder, tiles_by_date)
out_base <- file.path(out_folder,
paste(product, sitecode,
format(this_date, '%Y%j'), sep='_'))
subdatasets <- get_subdatasets(srcfiles[[1]])
band_names <- data.frame(band=seq(1, length(subdatasets)),
name=gsub(':', '', str_extract(subdatasets, ':[a-zA-Z0-9_]*$')))
write.csv(band_names, file=paste0(out_base, '_bandnames.csv'), row.names=FALSE)
# First build a VRT with all the bands in the HDF file (this mosaics
# the tiles, but with delayed computation - the actual mosaicing
# computations won't take place until the gdalwarp line below)
vrt_file <- paste0(out_base, '_temp', n, '.vrt')
gdalbuildvrt(srcfiles, vrt_file, separate=TRUE)
# Mosaic, reproject, and crop vrts
gdalwarp(vrt_file, dstfile, t_srs=t_srs, te=te,
tr=c(500, 500), r='cubicspline', overwrite=overwrite)
# Delete the temp files
unlink(vrt_files)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{optimalAssignment}
\alias{optimalAssignment}
\title{Optimal Assignment Algorithm}
\usage{
optimalAssignment(rankings, leaders, minGroupSize = 1L, maxGroupSize = -1L)
}
\arguments{
\item{rankings}{[NumericMatrix]: a matrix of rankings for each student. The
ith row corresponds to student i, and the jth column corresponds to the student
in position j on any student's ranking list. If there are N students, then each
student should have an ID in the range [1,N], which is used both as the row index
(to refer to a student's rankings) and entries of the matrix. For example, if
rankings[i,j] = k, then student i has placed student k in position j on i's ranking.}
\item{leaders}{[int]: the number of leaders to pick; equivalently, the number
of groups to form.}
\item{minGroupSize}{[int]: the minimum number of students in a group. NOTE:
this value does include the group leader. So if minGroupSize = 4, then every
group must consist of 1 leader and at least 3 other students.}
\item{maxGroupSize}{[int]: the maximum number of students in a group. NOTE:
this value does include the group leader. So if maxGroupSize = 6, then every
group must consist of 1 leader and at most 5 other students.}
}
\description{
Optimal Assignment Algorithm
}
| /man/optimalAssignment.Rd | permissive | hkunda/GroupAssignment | R | false | true | 1,339 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{optimalAssignment}
\alias{optimalAssignment}
\title{Optimal Assignment Algorithm}
\usage{
optimalAssignment(rankings, leaders, minGroupSize = 1L, maxGroupSize = -1L)
}
\arguments{
\item{rankings}{[NumericMatrix]: a matrix of rankings for each student. The
ith row corresponds to student i, and the jth column corresponds to the student
in position j on any student's ranking list. If there are N students, then each
student should have an ID in the range [1,N], which is used both as the row index
(to refer to a student's rankings) and entries of the matrix. For example, if
rankings[i,j] = k, then student i has placed student k in position j on i's ranking.}
\item{leaders}{[int]: the number of leaders to pick; equivalently, the number
of groups to form.}
\item{minGroupSize}{[int]: the minimum number of students in a group. NOTE:
this value does include the group leader. So if minGroupSize = 4, then every
group must consist of 1 leader and at least 3 other students.}
\item{maxGroupSize}{[int]: the maximum number of students in a group. NOTE:
this value does include the group leader. So if maxGroupSize = 6, then every
group must consist of 1 leader and at most 5 other students.}
}
\description{
Optimal Assignment Algorithm
}
|
# Reading the customer data
data <- read.csv("F:/DSP Projects/Project 3/Dataset/churn.csv")
data <- data.frame(data)
# Initial Assessmenmt
dim(data)
str(data)
summary(data)
# Checking for null values
library(naniar)
vis_miss(data)
colSums(is.na(data))
# Changing SeniorCitizen into categorical variable
da<-data
da$SeniorCitizen[1:4]
for (i in seq(length(da$SeniorCitizen))) {
if(da$SeniorCitizen[i]<=0.6) { da$SeniorCitizen[i]<-"No" }
else { da$SeniorCitizen[i] <-"Yes" }
}
da$SeniorCitizen <- factor(da$SeniorCitizen)
str(da)
head(da$SeniorCitizen)
data <- data.frame(da)
str(data)
# Splitting into training and testing data
set.seed(2)
l <- floor(0.75*nrow(data))
train <- sample(1:nrow(data), l)
ID_train <- data[train,1]
ID_test <- data[-train,1]
data_train <- data[train,-1]
data_test <- data[-train,-1]
# Extrapolatory Analysis
# Bar plot for categorical variables
barplot(table(data_train$gender), col=c('Pink','Blue'), main = "Bar Plot of Gender", xlab="Gender", ylab="Counts")
barplot(table(data_train$SeniorCitizen), col=c('Green','Red'), main = "Bar Plot of Senior Citizen", xlab="Senior Citizen", ylab="Counts")
barplot(table(data_train$Partner), col=c('Pink','Blue'), main = "Bar Plot of Partner", xlab="Partner", ylab="Counts")
barplot(table(data_train$Dependents), col=c('Pink','Blue'), main = "Bar Plot of Dependents", xlab="Dependents", ylab="Counts")
barplot(table(data_train$CallService), col=c('Pink','Blue'), main = "Bar Plot of Call Service", xlab="Call Service", ylab="Counts")
barplot(table(data_train$MultipleConnections), col=c('Red','Blue','Green'), main = "Bar Plot of Multiple Connections", xlab="Multiple Connections", ylab="Counts")
barplot(table(data_train$InternetConnection), col=c('Red','Blue','Green'), main = "Bar Plot of Internet Connection", xlab="Internet Connection", ylab="Counts")
barplot(table(data_train$OnlineSecurity), col=c('Red','Blue','Green'), main = "Bar Plot of Online Security", xlab="Online Security", ylab="Counts")
barplot(table(data_train$OnlineBackup), col=c('Red','Blue','Green'), main = "Bar Plot of Online Back up", xlab="Online Back up", ylab="Counts")
barplot(table(data_train$DeviceProtectionService), col=c('Red','Blue','Green'), main = "Bar Plot of Device Protection Service", xlab="Dewvice Protection Service", ylab="Counts")
barplot(table(data_train$TechnicalHelp), col=c('Red','Blue','Green'), main = "Bar Plot of Technical Help", xlab="Technical Help", ylab="Counts")
barplot(table(data_train$OnlineTV), col=c('Red','Blue','Green'), main = "Bar Plot of Online TV", xlab="Online TV", ylab="Counts")
barplot(table(data_train$OnlineMovies), col=c('Red','Blue','Green'), main = "Bar Plot of Online Movies", xlab="Online Movies", ylab="Counts")
barplot(table(data_train$Agreement), col=c('Red','Blue','Green'), main = "Bar Plot of Agreement", xlab="Agreement", ylab="Counts")
barplot(table(data_train$BillingMethod), col=c('Red','Green'), main = "Bar Plot of Billing Method", xlab="Billing Method", ylab="Counts")
barplot(table(data_train$PaymentMethod), col=c('Red','Yellow','Green','Blue'), main = "Bar Plot of Payment Method", xlab="Payment Method", ylab="Counts")
barplot(table(data_train$Churn), col=c('Red','Blue'), main = "Bar Plot of Churn", xlab="Churn", ylab="Counts")
# Boxplot for continuous variables
boxplot(data_train$tenure, main="Box Plot for Tenure", horizontal = TRUE)
boxplot(data_train$MonthlyServiceCharges, main="Box Plot for Monthly Service Charges", horizontal = TRUE)
boxplot(data_train$TotalAmount, main="Box Plot for Total Amount", horizontal = TRUE)
# Histogram for continuous variables
hist(data_train$tenure, main="Histogram for Tenure", col = "yellow", xlab = "Tenure")
hist(data_train$MonthlyServiceCharges, main="Histogram for Service Charges", col = "orange", xlab = "Service Charges")
hist(data_train$TotalAmount, main="Histogram for Total Amount", col = "red", xlab = "Total Amount")
skew(data_train$TotalAmount)
# SKewness and Kurtosis
install.packages("moments")
library(moments)
skewness(data_train$tenure) # Skewness=0.43, in (-2,2), within the bounds of normal distribution
kurtosis(data_train$tenure) # Kurtosis=1.83, in (-2,2), within the bounds of normal distribution
skewness(data_train$MonthlyServiceCharges) # Skewness=-0.03, in (-2,2), within the bounds of normal distribution
kurtosis(data_train$MonthlyServiceCharges) # Kurtosis=1.76, in (-2,2), within the bounds of normal distribution
skewness(data_train$TotalAmount) # Skewness=1.15, in (-2,2), within the bounds of normal distribution
kurtosis(data_train$TotalAmount) # Kurtosis=3.32, in (-2,2), within the bounds of normal distribution
# Build a tree
library(tree)
tree.data_train <- tree(Churn~., data_train)
summary(tree.data_train)
#Plot
plot(tree.data_train)
text(tree.data_train, pretty=0)
# Testing on tree
tree.pred1 <- predict(tree.data_train, data_test, type = 'class')
table(tree.pred1, data_test$Churn)
# Accuracy= (1392+800)/3084 = 71%
# Pruning the tree
set.seed(3)
cv.data <- cv.tree(tree.data_train, FUN = prune.misclass)
names(cv.data)
cv.data
# Plotting Error
par(mfrow=c(1,2))
plot(cv.data$size, cv.data$dev, type='b', col='red', lwd=2)
plot(cv.data$k, cv.data$dev, type='b', col='red', lwd=2)
# Prune the tree to 4 classes
dev.off()
prune.data <- prune.misclass(tree.data_train, best=4)
plot(prune.data)
text(prune.data, pretty=0)
tree.predp <- predict(prune.data, data_test, type='class')
table(tree.predp, data_test$Churn)
# Accuracy=71%
# Bagging
library(randomForest)
set.seed(1)
bag.data <- randomForest(Churn~., data_train, mtry=10, importance=TRUE)
importance(bag.data)
# As the features that have top 3 MeanDecreaseGini
# MonthlyServiceChargews, tenure and TotalAmount are the most important variables
varImpPlot(bag.data, col='green', pch=10, cex=1.25)
bag.data
bag.predict <- predict(bag.data, data_test, type='class')
table(bag.predict, data_test$Churn)
# Accuracy=(1556+1158)/3084=88%
# Random Forest sqr(10)=mtry=3
set.seed(1)
rf.data <- randomForest(Churn~., data_train, mtry=3, importance=TRUE)
importance(rf.data)
# As the features that have top 3 MeanDecreaseGini
# MonthlyServiceChargews, tenure and TotalAmount are the most important variables
varImpPlot(rf.data, col='purple', pch=10, cex=1.25)
rf.data
rf..predict <- predict(rf.data, data_test, type='class')
table(rf..predict, data_test$Churn)
# Accuracy=(1560+1128)/3084=87.16%
# Accuracy :
# before pruning :71%
# after pruning : 71%
# bagging : 88%
# random forest : 87.16%
| /churn_analysis_telecom.R | no_license | nithin316/Churn-Analysis-in-Telecom-Industry | R | false | false | 6,684 | r |
# Reading the customer data
data <- read.csv("F:/DSP Projects/Project 3/Dataset/churn.csv")
data <- data.frame(data)
# Initial Assessmenmt
dim(data)
str(data)
summary(data)
# Checking for null values
library(naniar)
vis_miss(data)
colSums(is.na(data))
# Changing SeniorCitizen into categorical variable
da<-data
da$SeniorCitizen[1:4]
for (i in seq(length(da$SeniorCitizen))) {
if(da$SeniorCitizen[i]<=0.6) { da$SeniorCitizen[i]<-"No" }
else { da$SeniorCitizen[i] <-"Yes" }
}
da$SeniorCitizen <- factor(da$SeniorCitizen)
str(da)
head(da$SeniorCitizen)
data <- data.frame(da)
str(data)
# Splitting into training and testing data
set.seed(2)
l <- floor(0.75*nrow(data))
train <- sample(1:nrow(data), l)
ID_train <- data[train,1]
ID_test <- data[-train,1]
data_train <- data[train,-1]
data_test <- data[-train,-1]
# Extrapolatory Analysis
# Bar plot for categorical variables
barplot(table(data_train$gender), col=c('Pink','Blue'), main = "Bar Plot of Gender", xlab="Gender", ylab="Counts")
barplot(table(data_train$SeniorCitizen), col=c('Green','Red'), main = "Bar Plot of Senior Citizen", xlab="Senior Citizen", ylab="Counts")
barplot(table(data_train$Partner), col=c('Pink','Blue'), main = "Bar Plot of Partner", xlab="Partner", ylab="Counts")
barplot(table(data_train$Dependents), col=c('Pink','Blue'), main = "Bar Plot of Dependents", xlab="Dependents", ylab="Counts")
barplot(table(data_train$CallService), col=c('Pink','Blue'), main = "Bar Plot of Call Service", xlab="Call Service", ylab="Counts")
barplot(table(data_train$MultipleConnections), col=c('Red','Blue','Green'), main = "Bar Plot of Multiple Connections", xlab="Multiple Connections", ylab="Counts")
barplot(table(data_train$InternetConnection), col=c('Red','Blue','Green'), main = "Bar Plot of Internet Connection", xlab="Internet Connection", ylab="Counts")
barplot(table(data_train$OnlineSecurity), col=c('Red','Blue','Green'), main = "Bar Plot of Online Security", xlab="Online Security", ylab="Counts")
barplot(table(data_train$OnlineBackup), col=c('Red','Blue','Green'), main = "Bar Plot of Online Back up", xlab="Online Back up", ylab="Counts")
barplot(table(data_train$DeviceProtectionService), col=c('Red','Blue','Green'), main = "Bar Plot of Device Protection Service", xlab="Dewvice Protection Service", ylab="Counts")
barplot(table(data_train$TechnicalHelp), col=c('Red','Blue','Green'), main = "Bar Plot of Technical Help", xlab="Technical Help", ylab="Counts")
barplot(table(data_train$OnlineTV), col=c('Red','Blue','Green'), main = "Bar Plot of Online TV", xlab="Online TV", ylab="Counts")
barplot(table(data_train$OnlineMovies), col=c('Red','Blue','Green'), main = "Bar Plot of Online Movies", xlab="Online Movies", ylab="Counts")
barplot(table(data_train$Agreement), col=c('Red','Blue','Green'), main = "Bar Plot of Agreement", xlab="Agreement", ylab="Counts")
barplot(table(data_train$BillingMethod), col=c('Red','Green'), main = "Bar Plot of Billing Method", xlab="Billing Method", ylab="Counts")
barplot(table(data_train$PaymentMethod), col=c('Red','Yellow','Green','Blue'), main = "Bar Plot of Payment Method", xlab="Payment Method", ylab="Counts")
barplot(table(data_train$Churn), col=c('Red','Blue'), main = "Bar Plot of Churn", xlab="Churn", ylab="Counts")
# Boxplot for continuous variables
boxplot(data_train$tenure, main="Box Plot for Tenure", horizontal = TRUE)
boxplot(data_train$MonthlyServiceCharges, main="Box Plot for Monthly Service Charges", horizontal = TRUE)
boxplot(data_train$TotalAmount, main="Box Plot for Total Amount", horizontal = TRUE)
# Histogram for continuous variables
hist(data_train$tenure, main="Histogram for Tenure", col = "yellow", xlab = "Tenure")
hist(data_train$MonthlyServiceCharges, main="Histogram for Service Charges", col = "orange", xlab = "Service Charges")
hist(data_train$TotalAmount, main="Histogram for Total Amount", col = "red", xlab = "Total Amount")
skew(data_train$TotalAmount)
# SKewness and Kurtosis
install.packages("moments")
library(moments)
skewness(data_train$tenure) # Skewness=0.43, in (-2,2), within the bounds of normal distribution
kurtosis(data_train$tenure) # Kurtosis=1.83, in (-2,2), within the bounds of normal distribution
skewness(data_train$MonthlyServiceCharges) # Skewness=-0.03, in (-2,2), within the bounds of normal distribution
kurtosis(data_train$MonthlyServiceCharges) # Kurtosis=1.76, in (-2,2), within the bounds of normal distribution
skewness(data_train$TotalAmount) # Skewness=1.15, in (-2,2), within the bounds of normal distribution
kurtosis(data_train$TotalAmount) # Kurtosis=3.32, in (-2,2), within the bounds of normal distribution
# Build a tree
library(tree)
tree.data_train <- tree(Churn~., data_train)
summary(tree.data_train)
#Plot
plot(tree.data_train)
text(tree.data_train, pretty=0)
# Testing on tree
tree.pred1 <- predict(tree.data_train, data_test, type = 'class')
table(tree.pred1, data_test$Churn)
# Accuracy= (1392+800)/3084 = 71%
# Pruning the tree
set.seed(3)
cv.data <- cv.tree(tree.data_train, FUN = prune.misclass)
names(cv.data)
cv.data
# Plotting Error
par(mfrow=c(1,2))
plot(cv.data$size, cv.data$dev, type='b', col='red', lwd=2)
plot(cv.data$k, cv.data$dev, type='b', col='red', lwd=2)
# Prune the tree to 4 classes
dev.off()
prune.data <- prune.misclass(tree.data_train, best=4)
plot(prune.data)
text(prune.data, pretty=0)
tree.predp <- predict(prune.data, data_test, type='class')
table(tree.predp, data_test$Churn)
# Accuracy=71%
# Bagging
library(randomForest)
set.seed(1)
bag.data <- randomForest(Churn~., data_train, mtry=10, importance=TRUE)
importance(bag.data)
# As the features that have top 3 MeanDecreaseGini
# MonthlyServiceChargews, tenure and TotalAmount are the most important variables
varImpPlot(bag.data, col='green', pch=10, cex=1.25)
bag.data
bag.predict <- predict(bag.data, data_test, type='class')
table(bag.predict, data_test$Churn)
# Accuracy=(1556+1158)/3084=88%
# Random Forest sqr(10)=mtry=3
set.seed(1)
rf.data <- randomForest(Churn~., data_train, mtry=3, importance=TRUE)
importance(rf.data)
# As the features that have top 3 MeanDecreaseGini
# MonthlyServiceChargews, tenure and TotalAmount are the most important variables
varImpPlot(rf.data, col='purple', pch=10, cex=1.25)
rf.data
rf..predict <- predict(rf.data, data_test, type='class')
table(rf..predict, data_test$Churn)
# Accuracy=(1560+1128)/3084=87.16%
# Accuracy :
# before pruning :71%
# after pruning : 71%
# bagging : 88%
# random forest : 87.16%
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/feature-scale.R
\name{mean_normalize}
\alias{mean_normalize}
\title{Creates a distribution that will have values between `(-1, 1)` with `μ = 0`.}
\usage{
mean_normalize(x)
}
\description{
Creates a distribution that will have values between `(-1, 1)` with `μ = 0`.
}
| /man/mean_normalize.Rd | no_license | ifrit98/R2deepR | R | false | true | 347 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/feature-scale.R
\name{mean_normalize}
\alias{mean_normalize}
\title{Creates a distribution that will have values between `(-1, 1)` with `μ = 0`.}
\usage{
mean_normalize(x)
}
\description{
Creates a distribution that will have values between `(-1, 1)` with `μ = 0`.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/edec_stage_1.R
\name{estimate_props_qp}
\alias{estimate_props_qp}
\title{Estimate cell type proportions}
\usage{
estimate_props_qp(meth_bulk_samples, cell_type_specific_meth)
}
\arguments{
\item{meth_bulk_samples}{Matrix of methylation profiles of bulk complex
tissue samples. Columns correspond to different samples and rows correspond
to different loci/probes.}
\item{cell_type_specific_meth}{Matrix of methylation profiles of constituent
cell types. Columns correspond to different cell types and rows correspond
to different loci/probes.}
}
\value{
Matrix with estimated proportions of constituent cell types in each
sample.
}
\description{
This function will estimate the proportions of constituent cell types in
each input sample, given methylation profiles of complex tissue samples and
methylation profiles of constituent cell types.
}
\details{
EDec assumes that the methylation profiles of complex tissue samples
correspond to the linear combination of cell type proportions and
methylation profiles of each cell type. Given the methylation profiles of a
set of complex tissue samples and the methylation profiles of constituent
cell types this function estimates cell type proportions in each sample by
solving constrained least squares problems through quadratic programming. The
constraints are that the proportions of constituent cell types are numbers in
the [0,1] interval and that the proportions of all cell types in each sample
sum up to one.
}
| /man/estimate_props_qp.Rd | no_license | rghu/EDec | R | false | true | 1,548 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/edec_stage_1.R
\name{estimate_props_qp}
\alias{estimate_props_qp}
\title{Estimate cell type proportions}
\usage{
estimate_props_qp(meth_bulk_samples, cell_type_specific_meth)
}
\arguments{
\item{meth_bulk_samples}{Matrix of methylation profiles of bulk complex
tissue samples. Columns correspond to different samples and rows correspond
to different loci/probes.}
\item{cell_type_specific_meth}{Matrix of methylation profiles of constituent
cell types. Columns correspond to different cell types and rows correspond
to different loci/probes.}
}
\value{
Matrix with estimated proportions of constituent cell types in each
sample.
}
\description{
This function will estimate the proportions of constituent cell types in
each input sample, given methylation profiles of complex tissue samples and
methylation profiles of constituent cell types.
}
\details{
EDec assumes that the methylation profiles of complex tissue samples
correspond to the linear combination of cell type proportions and
methylation profiles of each cell type. Given the methylation profiles of a
set of complex tissue samples and the methylation profiles of constituent
cell types this function estimates cell type proportions in each sample by
solving constrained least squares problems through quadratic programming. The
constraints are that the proportions of constituent cell types are numbers in
the [0,1] interval and that the proportions of all cell types in each sample
sum up to one.
}
|
rm(list=ls())
#####################################################################################################
#Load packages
library(maps)
library(mapproj)
library(mapdata)
library(mapplots)
library(maptools)
library(ggplot2)
library(reshape)
#####################################################################################################
#####################################################################################################
#National level data summary
fed.highway <- read.csv("inputs/highwayfinances.csv",header=TRUE)
#Plot of federal highway funding/spending
pdf('figures/highway_finances.pdf')
par(mar=c(6,8,2,2))
plot(fed.highway$year,fed.highway$fueltax/10^6,type="n",xlab="Year",ylab="Annual revenue and expenditure\n for highways (billions of $)",ylim=c(0,max(fed.highway)/10^6),cex.lab=2,xaxt='n',yaxt='n')
grid()
axis(1,las=0,tck=.02,cex.axis=2)
axis(2,las=2,tck=.02,cex.axis=2)
lines(fed.highway$year,fed.highway$total/10^6,lwd=3,col="blue")
lines(fed.highway$year,fed.highway$expenditure/10^6,lwd=3,col="red")
text(x=c(1998.5,2008),y=c(35,43.5),pos=c(2,2),labels=c("Revenue","Expenditures"))
dev.off()
file.copy('figures/highway_finances.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/highway_finances.pdf')
pdf('figures/forslides/highway_finances.pdf',height=8,width=12)
par(mar=c(6,6,2,2))
plot(fed.highway$year,fed.highway$fueltax/10^6,type="n",xlab="Year",ylab="Annual revenue/spending\n for highways (billions of $)",ylim=c(0,max(fed.highway)/10^6),cex.lab=1.5,xaxt='n',yaxt='n')
grid()
axis(1,las=0,tck=.02)
axis(2,las=2,tck=.02)
lines(fed.highway$year,fed.highway$total/10^6,lwd=3,col="blue")
lines(fed.highway$year,fed.highway$expenditure/10^6,lwd=3,col="red")
text(x=c(1998.5,2008),y=c(35,43.5),pos=c(2,2),labels=c("Revenue","Expenditures"))
dev.off()
#####################################################################################################
#####################################################################################################
#State level data summary
#data import
state.finances <- read.csv("inputs/statefinances.csv",header=TRUE)
state.revenues <- state.finances[,1:8]
state.expenditures <- state.finances[,c(1,10:12,14:18)]
#re-ordering data so that the bar plot goes highest to smallest
state.revenues$state <- reorder(state.revenues$state,rowSums(state.revenues[-1]))
state.expenditures$state <- reorder(state.expenditures$state,rowSums(state.expenditures[-1]))
state.revenues.total <- rowSums(state.revenues[,c(2:length(state.revenues))])
state.expenditures.total <- rowSums(state.expenditures[,c(2:length(state.expenditures))])
state.diff <- data.frame('state'=state.revenues$state,'diff'=state.revenues.total-state.expenditures.total)
#function returning top 25
top25 <- function(input) {
input$sum <- rowSums(input[-1])
input <- input[order(-input[,ncol(input)]),]
return(input[-ncol(input)])
}
state.revenues1 <- top25(state.revenues)[1:26,]
state.revenues2 <- top25(state.revenues)[27:51,]
state.expenditures1 <- state.expenditures[state.expenditures$state%in%state.revenues1$state,]
state.expenditures2 <- state.expenditures[state.expenditures$state%in%state.revenues2$state,]
state.diff1 <- state.diff[state.diff$state%in%state.revenues1$state,]
state.diff2 <- state.diff[state.diff$state%in%state.revenues2$state,]
state.diff1 <- state.diff1[order(match(state.diff1$state,state.revenues1$state)),]
state.diff2 <- state.diff2[order(match(state.diff2$state,state.revenues2$state)),]
state.diff1$type = 'Difference'
state.diff2$type = 'Difference'
#Melt data tables together for ggplot input
m.state.revenues1 <- melt(state.revenues1,id.vars='state')
m.state.expenditures1 <- melt(state.expenditures1,id.vars='state')
m.state.revenues2 <- melt(state.revenues2,id.vars='state')
m.state.expenditures2 <- melt(state.expenditures2,id.vars='state')
empty.hold1 <- data.frame('state'=unique(m.state.expenditures1$state),'variable'='none',value=0,type='Expenditure')
empty.hold2 <- data.frame('state'=unique(m.state.expenditures2$state),'variable'='none',value=0,type='Expenditure')
m.state.revenues1$type <- 'Revenue'
m.state.expenditures1$type <- 'Expenditure'
m.state.diff1$type <- 'Difference'
m.state.all1 <- rbind(m.state.revenues1,empty.hold1,m.state.expenditures1)
m.state.revenues2$type <- 'Revenue'
m.state.expenditures2$type <- 'Expenditure'
m.state.diff2$type <- 'Difference'
m.state.all2 <- rbind(m.state.revenues2,empty.hold2,m.state.expenditures2)
color.vector <- c('navy','blue3','blue1','dodgerblue3','dodgerblue1','lightblue3','lightblue1','white','brown4','lightsalmon4','firebrick1','indianred2','lightpink3','lightpink2','lightpink','mistyrose2')
#plot of top 25 states broken down by components, separated by rev and exp
pdf('figures/top25_states.pdf',height=9,width=9)
#dev.new(height=6,width=9)
ggplot(m.state.all1,aes(x=state,y=value/10^6,fill=variable,width=.8))+
geom_bar(stat='identity',color='black')+
facet_wrap(~type)+
coord_flip()+
xlab("State")+
ylab("Billions of Dollars ($)")+
theme(axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"),
panel.background=element_rect(fill='white',colour='white'),
panel.grid.major.x=element_line(colour="black"),
panel.grid.minor.x=element_line(colour="white"))+
scale_fill_manual(name="Source",values=color.vector,labels=c("Use Taxes","Tolls","Gen Funds","Misc", "Bonds","Fed Gov","Local Gov","","Federal Highway","Other","Roads/Streets","Maintenance","Admin/Police","Interest","Bonds","Grants"))
dev.off()
file.copy('figures/top25_states.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/top25_states.pdf')
pdf('figures/top25_states_diff.pdf',height=9,width=6)
ggplot(state.diff1,aes(x=state,y=diff/10^6,width=.8))+
geom_bar(stat='identity')+
facet_wrap(~type)+
coord_flip()+
xlab("State")+
ylab("Billions of Dollars ($)")+
theme(axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"),
panel.background=element_rect(fill='white',colour='white'),
panel.grid.major.x=element_line(colour="black"),
panel.grid.minor.x=element_line(colour="white"))
dev.off()
file.copy('figures/top25_states_diff.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/top25_states_diff.pdf')
#plot of bottom 25 states broken down by components, separated by rev and exp
pdf('figures/bottom25_states.pdf',height=9,width=9)
#dev.new(height=6,width=9)
ggplot(m.state.all2,aes(x=state,y=value/10^6,fill=variable,width=.8))+
geom_bar(stat='identity',color='black')+
facet_wrap(~type)+
coord_flip()+
xlab("State")+
ylab("Billions of Dollars ($)")+
theme(axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"),
panel.background=element_rect(fill='white',colour='white'),
panel.grid.major.x=element_line(colour="black"),
panel.grid.minor.x=element_line(colour="white"))+
scale_fill_manual(name="Source",values=color.vector,labels=c("Use Taxes","Tolls","Gen Funds","Misc", "Bonds","Fed Gov","Local Gov","","Federal Highway","Other","Roads/Streets","Maintenance","Admin/Police","Interest","Bonds","Grants"))
dev.off()
file.copy('figures/bottom25_states.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/bottom25_states.pdf')
pdf('figures/bottom25_states_diff.pdf',height=9,width=6)
ggplot(state.diff2,aes(x=state,y=diff/10^6,width=.8))+
geom_bar(stat='identity')+
facet_wrap(~type)+
coord_flip()+
xlab("State")+
ylab("Billions of Dollars ($)")+
theme(axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"),
panel.background=element_rect(fill='white',colour='white'),
panel.grid.major.x=element_line(colour="black"),
panel.grid.minor.x=element_line(colour="white"))
dev.off()
file.copy('figures/bottom25_states_diff.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/bottom25_states_diff.pdf')
#####################################################################################################
#####################################################################################################
#EV Sales and projections
#load electric vehicle sales
ev.sales <- read.csv('inputs/ev_sales.csv')
ev.sales[is.na(ev.sales)] <- 0
ev.sales$Date <- as.Date(ev.sales$Date,"%m-%d-%Y")
#plot electric vehicle sales by model
colors.plot <- c("black","blue4","darkgreen","red2","orchid4","yellow1","orange","lightblue2","green","lightpink","violetred","gold","brown","dodgerblue2")
pdf('figures/ev_sales_bymodel.pdf')
par(mar=c(6,6,2,2))
plot(Chevrolet.Volt~Date,ev.sales,type="n",xlab="Date (monthly)",ylab="EV Sales (monthly)",ylim=c(min(ev.sales[,-1]),max(ev.sales[-1])),cex.lab=1.5,xaxt='n',yaxt='n')
axis(1,las=0,tck=.02)
axis(2,las=2,tck=.02)
#axis(1,ev.sales[,1],format(ev.sales$Date,"%m-%Y"),las=2,cex.axis=.5)
for(i in 2:length(ev.sales)) {
lines(ev.sales[,1],ev.sales[,i],col=colors.plot[i-1],lwd=2.5)
}
legend("topleft",c("Volt (PHEV)","Leaf (BEV)","SmartED (BEV)","i-MiEV (BEV)","ActiveE (BEV)","Prius (PHEV)","Focus (BEV)","Fit (BEV)","Model S (BEV)","RAV4 (BEV)","C-Max Energi (PHEV)","Accord (PHEV)","Fusion Energi (PHEV)","Spark (BEV)"),col=colors.plot,lwd=2,cex=.7)
dev.off()
file.copy('figures/ev_sales_bymodel.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/ev_sales_bymodel.pdf')
#####################################################################################################
#Popular sales
pop.evsales <- data.frame('date'=ev.sales$Date,'volt'=ev.sales$Chevrolet.Volt,'leaf'=ev.sales$Nissan.Leaf,'prius'=ev.sales$Toyota.Prius.PHV,'model.s'=ev.sales$Tesla.Model.S,'other'=rowSums(ev.sales[,c(4,5,6,8,9,11,12,13,14,15)]))
#plot popular electric vehicle sales by model
colors.plot2 <- c('black','blue4','darkgreen','red2','orange')
pdf('figures/popular_ev_sales_bymodel.pdf',height=5,width=8)
par(mar=c(6,6,2,2))
plot(volt~date,pop.evsales,type="n",xlab="Date (monthly)",ylab="EV Sales (monthly)",ylim=c(min(pop.evsales[,-1]),max(pop.evsales[-1])),cex.lab=1,xaxt='n',yaxt='n')
axis(1,las=0,at=pop.evsales$date[format(pop.evsales$date,"%m")=='01'|format(pop.evsales$date,"%m")=='07'],labels=format(pop.evsales$date[format(pop.evsales$date,"%m")=='01'|format(pop.evsales$date,"%m")=='07'],"%m-%Y"),cex=.8,tck=.02)
axis(2,las=2,tck=.02)
#axis(1,ev.sales[,1],format(ev.sales$Date,"%m-%Y"),las=2,cex.axis=.5)
for(i in 2:length(pop.evsales)) {
lines(pop.evsales[,1],pop.evsales[,i],col=colors.plot2[i-1],lwd=2.5)
}
legend("topleft",c("Volt (PHEV)","Leaf (BEV)","Prius (PHEV)","Model S (BEV)","Other"),col=colors.plot2,lwd=2,cex=.7)
dev.off()
file.copy('figures/popular_ev_sales_bymodel.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/popular_ev_sales_bymodel.pdf')
pdf('figures/forslides/popular_ev_sales_bymodel.pdf',height=6,width=6)
par(mar=c(6,6,2,2))
plot(volt~date,pop.evsales,type="n",xlab="Date (monthly)",ylab="EV Sales (monthly)",ylim=c(min(pop.evsales[,-1]),max(pop.evsales[-1])),cex.lab=1,xaxt='n',yaxt='n')
axis(1,las=0,at=pop.evsales$date[format(pop.evsales$date,"%m")=='01'|format(pop.evsales$date,"%m")=='07'],labels=format(pop.evsales$date[format(pop.evsales$date,"%m")=='01'|format(pop.evsales$date,"%m")=='07'],"%m-%Y"),cex=.8,tck=.02)
axis(2,las=2,tck=.02)
#axis(1,ev.sales[,1],format(ev.sales$Date,"%m-%Y"),las=2,cex.axis=.5)
for(i in 2:length(pop.evsales)) {
lines(pop.evsales[,1],pop.evsales[,i],col=colors.plot2[i-1],lwd=2.5)
}
legend("topleft",c("Volt (PHEV)","Leaf (BEV)","Prius (PHEV)","Model S (BEV)","Other"),col=colors.plot2,lwd=2,cex=.7)
dev.off()
#####################################################################################################
#Extra
#plot total electric vehicle sales
pdf('figures/ev_sales_total.pdf')
plot(Chevrolet.Volt~Date,ev.sales,type="n",xlab="Date (monthly)",ylab="Total EV Sales",ylim=c(0,max(rowSums(ev.sales[,2:length(ev.sales)]))),xaxt='n',yaxt='n')
axis(1,las=0)
axis(2,las=2)
lines(ev.sales[,1],rowSums(ev.sales[,2:length(ev.sales)]))
dev.off()
file.copy('figures/ev_sales_total.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/ev_sales_total.pdf')
#load hybrid vehicle sales
hev.sales <- read.csv('inputs/hybrid_sales.csv')
hev.sales[is.na(hev.sales)] <- 0
hev.sales$Date <- as.Date(hev.sales$Date,"%m/%d/%Y")
#plot hybrid vehicle sales by model
colors.plot3 <- c("blue4","red2")
pdf('figures/hev_sales_bymodel.pdf')
plot(Toyota.Prius~Date,hev.sales,type="n",xlab="Date (monthly)",ylab="HEV Sales",ylim=c(min(hev.sales[,-1]),max(ev.sales[-1])),xaxt='n',yaxt='n')
axis(1,las=0)
axis(2,las=2)
#axis(1,ev.sales[,1],format(ev.sales$Date,"%m-%Y"),las=2,cex.axis=.5)
for(i in 2:length(hev.sales)) {
lines(hev.sales[,1],hev.sales[,i],col=colors.plot3[i-1],lwd=2.5)
}
legend("topleft",c("Prius","Insight"),col=colors.plot3,lwd=2,cex=.7)
dev.off()
file.copy('figures/hev_sales_bymodel.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/hev_sales_bymodel.pdf')
#plot total electric vehicle sales vs hybrid electric vehicle sales
pdf('figures/combined_sales_total.pdf')
plot(1:length(ev.sales$Chevrolet.Volt),ev.sales$Chevrolet.Volt,type="n",xlab="Months Since Introduction",ylab="Total EV Sales",ylim=c(0,max(rowSums(ev.sales[,2:length(ev.sales)]))),xaxt='n',yaxt='n')
axis(1,las=0)
axis(2,las=2)
lines(1:length(rowSums(ev.sales[,2:length(ev.sales)])),rowSums(ev.sales[,2:length(ev.sales)]),col="blue4",lwd=2.5)
lines(1:length(rowSums(hev.sales[,2:length(hev.sales)])),rowSums(hev.sales[,2:length(hev.sales)]),col="red2",lwd=2.5)
legend("topleft",c("EVs","HEVs"),col=c("blue4","red2"),lwd=2)
dev.off()
file.copy('figures/combined_sales_total.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/combined_sales_total.pdf')
#####################################################################################################
#eia projection
projections <- read.csv('inputs/eia_projection.csv')
eia.2012 <- projections[,1:5]
eia.2013 <- projections[,c(1,6:9)]
colnames(eia.2012)=c("Year","BEV100","PHEV10","PHEV40","FFV")
colnames(eia.2013)=c("Year","BEV100","PHEV10","PHEV40","FFV")
#annual historical EV sales
annual.bevsales <- data.frame('year'=c(2011,2012),'sales'=c(sum(rowSums(ev.sales[-1][,c(2:5,7:14)])[1:13]),sum(rowSums(ev.sales[-1][,c(2:5,7:14)])[14:25])))
annual.phev10sales <- data.frame('year'=c(2011,2012),'sales'=c(sum(ev.sales[,7][1:13]),sum(ev.sales[,7][14:25])))
annual.phev40sales <- data.frame('year'=c(2011,2012),'sales'=c(sum(ev.sales[,2][1:13]),sum(ev.sales[,2][14:25])))
pdf('figures/eia_forecast2013.pdf')
par(mar=c(6,6,2,2))
plot(1,type="n",xlab="Year",ylab="",lwd=3.5,col="dodgerblue4",xlim=c(2011,2025),ylim=c(0,max(eia.2013[,2:4]/1000)),xaxt="n",yaxt="n",cex.lab=2)
grid()
axis(1,cex.axis=1,las=0,at=2011:2025,labels=2011:2025,tck=.02,cex.axis=2)
axis(2,cex.axis=1,las=2,tck=.02,cex.axis=2)
mtext('Sales (thousands)',side=2,line=4,cex=2)
abline(v=2012)
lines(eia.2013$Year,eia.2013$BEV100/1000,lty=1,lwd=3.5,col="dodgerblue4")
lines(eia.2013$Year,eia.2013$PHEV10/1000,lty=5,lwd=3.5,col="dodgerblue4")
lines(eia.2013$Year,eia.2013$PHEV40/1000,lty=3,lwd=3.5,col="dodgerblue4")
lines(2011:2012,annual.bevsales$sales/1000,lty=1,lwd=3.5,col='red')
lines(2011:2012,annual.phev10sales$sales/1000,lty=5,lwd=3.5,col='red')
lines(2011:2012,annual.phev40sales$sales/1000,lty=3,lwd=3.5,col='red')
text(x=c(2019,2015,2021.8),y=c(15,30,120),pos=c(4,4,4),labels=c("BEV-100","PHEV-10","PHEV-40"))
text(x=2012,y=150,pos=2,labels="Historical Sales",srt=90)
text(x=2012,y=150,pos=4,labels="Projected Sales",srt=270)
arrows(2012,100,2011,100,length=.1,lwd=1.5)
arrows(2012,155,2013,155,length=.1,lwd=1.5)
dev.off()
file.copy('figures/eia_forecast2013.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/eia_forecast2013.pdf')
pdf('figures/forslides/eia_forecast2013.pdf',height=6,width=6)
par(mar=c(6,6,2,2))
plot(1,type="n",xlab="Year",ylab="Sales (thousands)",lwd=3.5,col="dodgerblue4",xlim=c(2011,2025),ylim=c(0,max(eia.2013[,2:4]/1000)),xaxt="n",yaxt="n",cex.lab=1.5)
grid()
axis(1,cex.axis=1,las=0,at=2011:2025,labels=2011:2025)
axis(2,cex.axis=1,las=2)
abline(v=2012)
lines(eia.2013$Year,eia.2013$BEV100/1000,lty=1,lwd=3.5,col="dodgerblue4")
lines(eia.2013$Year,eia.2013$PHEV10/1000,lty=5,lwd=3.5,col="dodgerblue4")
lines(eia.2013$Year,eia.2013$PHEV40/1000,lty=3,lwd=3.5,col="dodgerblue4")
lines(2011:2012,annual.bevsales$sales/1000,lty=1,lwd=3.5,col='red')
lines(2011:2012,annual.phev10sales$sales/1000,lty=5,lwd=3.5,col='red')
lines(2011:2012,annual.phev40sales$sales/1000,lty=3,lwd=3.5,col='red')
text(x=c(2019,2015,2021.8),y=c(15,30,120),pos=c(4,4,4),labels=c("BEV-100","PHEV-10","PHEV-40"))
text(x=2012,y=150,pos=2,labels="Historical Sales",srt=90)
text(x=2012,y=150,pos=4,labels="Projected Sales",srt=270)
arrows(2012,100,2011,100,length=.1,lwd=1.5)
arrows(2012,155,2013,155,length=.1,lwd=1.5)
dev.off()
#####################################################################################################
#Policy forecasts
forecasts2 <- read.csv('inputs/forecasts_v2.csv')
grey.shade = rgb(0,0,0,alpha=0.2,maxColorValue=1)
pdf('figures/forecasts.pdf',height=6,width=9)
par(mar=c(4,6,2,2))
plot(x=forecasts2$Year,y=1:nrow(forecasts2),type='n',xlab=NA,ylab=NA,xaxt='n',yaxt='n',xlim=c(2008,2055),ylim=c(0,1))
grid()
axis(1,las=0,tck=.02,cex.axis=1.5)
axis(2,las=2,tck=.02,cex.axis=1.5)
mtext('Year',side=1,line=2.5,cex=1.5)
mtext('Market Share',side=2,line=3.5,cex=1.5)
polygon(x=c(2012,2020,2050,2050,2020,2010),y=c(0,.55,1,.17,0,0),col=grey.shade,border=NA)
for(column in 2:ncol(forecasts2)){
lines(x=na.omit(forecasts2[,c(1,column)])$Year,y=na.omit(forecasts2[,column]),lwd=2,lty=column,col='grey30')
if(column==3) {
text(x=tail(na.omit(forecasts2[,c(1,column)])$Year,n=1),y=tail(na.omit(forecasts2[,column]),n=1)-.03,labels=labels(forecasts2)[[2]][column],cex=.8,pos=1)
}
else {
text(x=tail(na.omit(forecasts2[,c(1,column)])$Year,n=1),y=tail(na.omit(forecasts2[,column]),n=1),labels=labels(forecasts2)[[2]][column],cex=.8,pos=4)
}
}
text(x=2040,y=.4,pos=4,labels='FORECASTS',font=2)
dev.off()
#####################################################################################################
#Chloropleth Maps
#import data
camry <- read.csv("inputs/camry.csv")
civic <- read.csv("inputs/civic.csv")
f150 <- read.csv("inputs/f150.csv")
leaf <- read.csv("inputs/leaf.csv")
prius <- read.csv("inputs/prius.csv")
prius.phev <- read.csv("inputs/priusphev.csv")
volt <- read.csv("inputs/volt.csv")
#state names
mapnames <- map("state",plot=FALSE)$names
mapnames.state <- ifelse(regexpr(":",mapnames) < 0,mapnames,
substr(mapnames, 1, regexpr(":",mapnames)-1))
#convert states to lowercase
camry$State <- tolower(camry$State)
civic$State <- tolower(civic$State)
f150$State <- tolower(f150$State)
leaf$State <- tolower(leaf$State)
prius$State <- tolower(prius$State)
prius.phev$State <- tolower(prius.phev$State)
volt$State <- tolower(volt$State)
#chloropleth quantities
color.split <- c(1500,3000,4500,6000)
#camry choropleth map
for(i in 1:nrow(camry)) {
if(camry$Total[i]>color.split[4]) {
camry$color[i] <- "blue4"
}
else if(camry$Total[i]>color.split[3]) {
camry$color[i] <- "blue2"
}
else if(camry$Total[i]>color.split[2]) {
camry$color[i] <- "dodgerblue"
}
else if(camry$Total[i]>color.split[1]) {
camry$color[i] <- "lightskyblue"
}
else {
camry$color[i] <- "white"
}
}
pdf('figures/camry_fees.pdf')
map("state",fill=TRUE,col=camry$color[match(mapnames.state,camry$State)])
raincol <- c("purple","red","darkgreen","black","yellow")
draw.pie(state.center$x,state.center$y,as.matrix(camry[,c(3,4,5,6,7)]),radius=.65,col=raincol,scale=FALSE)
#legend("bottomleft",leg=c("Federal Fuel Tax","State Fuel Tax","Registration Fees","Title Fees","Inspection Fees"),fill=c("purple","red","darkgreen","black","yellow"),cex=0.7,title="Fee Breakdown")
#legend("bottomright",leg=c('0-1499','1500-2999','3000-4499','4500-5999','>6000'),fill=c("white","lightskyblue","dodgerblue","blue2","blue4"),title="Total Fees ($)",cex=.7)
dev.off()
file.copy('figures/camry_fees.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/camry_fees.pdf')
#civic choropleth map
for(i in 1:nrow(civic)) {
if(civic$Total[i]>color.split[4]) {
civic$color[i] <- "blue4"
}
else if(civic$Total[i]>color.split[3]) {
civic$color[i] <- "blue2"
}
else if(civic$Total[i]>color.split[2]) {
civic$color[i] <- "dodgerblue"
}
else if(civic$Total[i]>color.split[1]) {
civic$color[i] <- "lightskyblue"
}
else {
civic$color[i] <- "white"
}
}
pdf('figures/civic_fees.pdf')
map("state",fill=TRUE,col=civic$color[match(mapnames.state,civic$State)])
raincol <- c("purple","red","darkgreen","black","yellow")
draw.pie(state.center$x,state.center$y,as.matrix(civic[,c(3,4,5,6,7)]),radius=.65,col=raincol,scale=FALSE)
#legend("bottomleft",leg=c("Federal Fuel Tax","State Fuel Tax","Registration Fees","Title Fees","Inspection Fees"),fill=c("purple","red","darkgreen","black","yellow"),cex=0.7,title="Fee Breakdown")
#legend("bottomright",leg=c('0-1499','1500-2999','3000-4499','4500-5999','>6000'),fill=c("white","lightskyblue","dodgerblue","blue2","blue4"),title="Total Fees ($)",cex=.7)
dev.off()
file.copy('figures/civic_fees.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/civic_fees.pdf')
#f150 choropleth map
for(i in 1:nrow(f150)) {
if(f150$Total[i]>color.split[4]) {
f150$color[i] <- "blue4"
}
else if(f150$Total[i]>color.split[3]) {
f150$color[i] <- "blue2"
}
else if(f150$Total[i]>color.split[2]) {
f150$color[i] <- "dodgerblue"
}
else if(f150$Total[i]>color.split[1]) {
f150$color[i] <- "lightskyblue"
}
else {
f150$color[i] <- "white"
}
}
pdf('figures/f150_fees.pdf')
map("state",fill=TRUE,col=f150$color[match(mapnames.state,f150$State)])
raincol <- c("purple","red","darkgreen","black","yellow")
draw.pie(state.center$x,state.center$y,as.matrix(f150[,c(3,4,5,6,7)]),radius=.65,col=raincol,scale=FALSE)
#legend("bottomleft",leg=c("Federal Fuel Tax","State Fuel Tax","Registration Fees","Title Fees","Inspection Fees"),fill=c("purple","red","darkgreen","black","yellow"),cex=0.7,title="Fee Breakdown")
#legend("bottomright",leg=c('0-1499','1500-2999','3000-4499','4500-5999','>6000'),fill=c("white","lightskyblue","dodgerblue","blue2","blue4"),title="Total Fees ($)",cex=.7)
dev.off()
file.copy('figures/f150_fees.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/f150_fees.pdf')
#leaf choropleth map
for(i in 1:nrow(leaf)) {
if(leaf$Total[i]>color.split[4]) {
leaf$color[i] <- "blue4"
}
else if(leaf$Total[i]>color.split[3]) {
leaf$color[i] <- "blue2"
}
else if(leaf$Total[i]>color.split[2]) {
leaf$color[i] <- "dodgerblue"
}
else if(leaf$Total[i]>color.split[1]) {
leaf$color[i] <- "lightskyblue"
}
else {
leaf$color[i] <- "white"
}
}
pdf('figures/leaf_fees.pdf')
map("state",fill=TRUE,col=leaf$color[match(mapnames.state,leaf$State)])
raincol <- c("purple","red","darkgreen","black","yellow")
draw.pie(state.center$x,state.center$y,as.matrix(leaf[,c(3,4,5,6,7)]),radius=.65,col=raincol,scale=FALSE)
#legend("bottomleft",leg=c("Federal Fuel Tax","State Fuel Tax","Registration Fees","Title Fees","Inspection Fees"),fill=c("purple","red","darkgreen","black","yellow"),cex=0.7,title="Fee Breakdown")
#legend("bottomright",leg=c('0-1499','1500-2999','3000-4499','4500-5999','>6000'),fill=c("white","lightskyblue","dodgerblue","blue2","blue4"),title="Total Fees ($)",cex=.7)
dev.off()
file.copy('figures/leaf_fees.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/leaf_fees.pdf')
#prius choropleth map
for(i in 1:nrow(prius)) {
if(prius$Total[i]>color.split[4]) {
prius$color[i] <- "blue4"
}
else if(prius$Total[i]>color.split[3]) {
prius$color[i] <- "blue2"
}
else if(prius$Total[i]>color.split[2]) {
prius$color[i] <- "dodgerblue"
}
else if(prius$Total[i]>color.split[1]) {
prius$color[i] <- "lightskyblue"
}
else {
prius$color[i] <- "white"
}
}
pdf('figures/prius_fees.pdf')
map("state",fill=TRUE,col=prius$color[match(mapnames.state,prius$State)])
raincol <- c("purple","red","darkgreen","black","yellow")
draw.pie(state.center$x,state.center$y,as.matrix(prius[,c(3,4,5,6,7)]),radius=.65,col=raincol,scale=FALSE)
#legend("bottomleft",leg=c("Federal Fuel Tax","State Fuel Tax","Registration Fees","Title Fees","Inspection Fees"),fill=c("purple","red","darkgreen","black","yellow"),cex=0.7,title="Fee Breakdown")
#legend("bottomright",leg=c('0-1499','1500-2999','3000-4499','4500-5999','>6000'),fill=c("white","lightskyblue","dodgerblue","blue2","blue4"),title="Total Fees ($)",cex=.7)
dev.off()
file.copy('figures/prius_fees.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/prius_fees.pdf')
#prius phev choropleth map
for(i in 1:nrow(prius.phev)) {
if(prius.phev$Total[i]>color.split[4]) {
prius.phev$color[i] <- "blue4"
}
else if(prius.phev$Total[i]>color.split[3]) {
prius.phev$color[i] <- "blue2"
}
else if(prius.phev$Total[i]>color.split[2]) {
prius.phev$color[i] <- "dodgerblue"
}
else if(prius.phev$Total[i]>color.split[1]) {
prius.phev$color[i] <- "lightskyblue"
}
else {
prius.phev$color[i] <- "white"
}
}
pdf('figures/priusphev_fees.pdf')
map("state",fill=TRUE,col=prius.phev$color[match(mapnames.state,prius.phev$State)])
raincol <- c("purple","red","darkgreen","black","yellow")
draw.pie(state.center$x,state.center$y,as.matrix(prius.phev[,c(3,4,5,6,7)]),radius=.65,col=raincol,scale=FALSE)
#legend("bottomleft",leg=c("Federal Fuel Tax","State Fuel Tax","Registration Fees","Title Fees","Inspection Fees"),fill=c("purple","red","darkgreen","black","yellow"),cex=0.7,title="Fee Breakdown")
#legend("bottomright",leg=c('0-1499','1500-2999','3000-4499','4500-5999','>6000'),fill=c("white","lightskyblue","dodgerblue","blue2","blue4"),title="Total Fees ($)",cex=.7)
dev.off()
file.copy('figures/priusphev_fees.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/priusphev_fees.pdf')
#volt choropleth map
for(i in 1:nrow(volt)) {
if(volt$Total[i]>color.split[4]) {
volt$color[i] <- "blue4"
}
else if(volt$Total[i]>color.split[3]) {
volt$color[i] <- "blue2"
}
else if(volt$Total[i]>color.split[2]) {
volt$color[i] <- "dodgerblue"
}
else if(volt$Total[i]>color.split[1]) {
volt$color[i] <- "lightskyblue"
}
else {
volt$color[i] <- "white"
}
}
pdf('figures/volt_fees.pdf')
map("state",fill=TRUE,col=volt$color[match(mapnames.state,volt$State)])
raincol <- c("purple","red","darkgreen","black","yellow")
draw.pie(state.center$x,state.center$y,as.matrix(volt[,c(3,4,5,6,7)]),radius=.65,col=raincol,scale=FALSE)
#legend("bottomleft",leg=c("Federal Fuel Tax","State Fuel Tax","Registration Fees","Title Fees","Inspection Fees"),fill=c("purple","red","darkgreen","black","yellow"),cex=0.7,title="Fee Breakdown")
#legend("bottomright",leg=c('0-1499','1500-2999','3000-4499','4500-5999','>6000'), fill=c("white","lightskyblue","dodgerblue","blue2","blue4"),title="Total Fees ($)",cex=.7)
dev.off()
file.copy('figures/volt_fees.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/volt_fees.pdf')
pdf('figures/map_legend.pdf')
plot(x=0:50,y=0:50,xlab='',ylab='',type='n',xaxt='n',yaxt='n',bty='n')
legend(x=0,y=50,leg=c("Federal Fuel Tax","State Fuel Tax","Registration Fees","Title Fees","Inspection Fees"),
fill=c("purple","red","darkgreen","black","yellow"),cex=0.7,title="Fee Breakdown",box.lwd = 0,box.col = "white",bg = "white")
legend(x=15,y=50,leg=c('0-1499','1500-2999','3000-4499','4500-5999','>6000'),
fill=c("white","lightskyblue","dodgerblue","blue2","blue4"),title="Total Fees ($)",cex=.7,box.lwd = 0,box.col = "white",bg = "white",ncol=2)
dev.off()
file.copy('figures/map_legend.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/map_legend.pdf')
#####################################################################################################
#Differences map
diff.f150.leaf <- data.frame('State'=f150$State,'Total'=f150$Total-leaf$Total)
color.split2 <- c(1000,2000,3000,4000)
for(i in 1:nrow(diff.f150.leaf)) {
if(diff.f150.leaf$Total[i]>color.split2[4]) {
diff.f150.leaf$color[i] <- "red4"
}
else if(diff.f150.leaf$Total[i]>color.split2[3]) {
diff.f150.leaf$color[i] <- "red1"
}
else if(diff.f150.leaf$Total[i]>color.split2[2]) {
diff.f150.leaf$color[i] <- "indianred2"
}
else if(diff.f150.leaf$Total[i]>color.split2[1]) {
diff.f150.leaf$color[i] <- "pink1"
}
else {
diff.f150.leaf$color[i] <- "white"
}
}
pdf('figures/diff_fees.pdf')
map("state",fill=TRUE,col=diff.f150.leaf$color[match(mapnames.state,diff.f150.leaf$State)])
legend("bottomright",leg=c('0-999','1000-1999','2000-2999','3000-3999','>4000'),
fill=rev(c("red4","red1","indianred2","pink1","white")),title="Revenue Difference ($)",bty='n',cex=.7)
dev.off()
file.copy('figures/diff_fees.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/diff_fees.pdf')
#####################################################################################################
#####################################################################################################
#Prius distribution for appendix
#load state sale distribution data
state.percents <- read.csv('inputs/prius_statepercentages.csv')
#total and percent diff by state
state.percents.total <- read.csv('inputs/total_statepercentages.csv')
state.percents.diff <- read.csv('inputs/percentdiff_bystate.csv',header=FALSE)
#chloropleth map of prius state sales distribution
priussales <- data.frame("state"=camry$State,"percent"=colMeans(state.percents))
priussales.quantiles <- quantile(colMeans(state.percents),probs=c(.2,.4,.6,.8))
for(i in 1:nrow(priussales)) {
if(priussales$percent[i]>priussales.quantiles[4]) {
priussales$color[i] <- "blue4"
}
else if(priussales$percent[i]>priussales.quantiles[3]) {
priussales$color[i] <- "blue2"
}
else if(priussales$percent[i]>priussales.quantiles[2]) {
priussales$color[i] <- "dodgerblue"
}
else if(priussales$percent[i]>priussales.quantiles[1]) {
priussales$color[i] <- "lightskyblue"
}
else {
priussales$color[i] <- "white"
}
}
pdf('figures/priussales.pdf')
map("state",fill=TRUE,col=priussales$color[match(mapnames.state,priussales$state)])
legend("bottomright",leg=c(as.expression(paste(0," - ",signif(priussales.quantiles[1],3))),
as.expression(paste(signif(priussales.quantiles[1]*100,3)," - ",signif(priussales.quantiles[2]*100,3))),
as.expression(paste(signif(priussales.quantiles[2]*100,3)," - ",signif(priussales.quantiles[3]*100,3))),
as.expression(paste(signif(priussales.quantiles[3]*100,3)," - ",signif(priussales.quantiles[4]*100,3))),
as.expression(paste(signif(priussales.quantiles[4]*100,3)," - ",signif(max(priussales$percent)*100,3)))),
fill=c("white","lightskyblue","dodgerblue","blue2","blue4"),title="Sales Distribution (%)",cex=.7)
dev.off()
file.copy('figures/priussales.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/priussales.pdf')
#chloropleth map of total state sales distribution
totalsales <- data.frame("state"=camry$State,"percent"=colMeans(state.percents.total))
totalsales.quantiles <- quantile(colMeans(state.percents.total),probs=c(.2,.4,.6,.8))
for(i in 1:nrow(totalsales)) {
if(totalsales$percent[i]>totalsales.quantiles[4]) {
totalsales$color[i] <- "blue4"
}
else if(totalsales$percent[i]>totalsales.quantiles[3]) {
totalsales$color[i] <- "blue2"
}
else if(totalsales$percent[i]>totalsales.quantiles[2]) {
totalsales$color[i] <- "dodgerblue"
}
else if(totalsales$percent[i]>totalsales.quantiles[1]) {
totalsales$color[i] <- "lightskyblue"
}
else {
totalsales$color[i] <- "white"
}
}
pdf('figures/totalsales.pdf')
map("state",fill=TRUE,col=totalsales$color[match(mapnames.state,totalsales$state)])
legend("bottomright",leg=c(as.expression(paste(0," - ",signif(totalsales.quantiles[1],3))),
as.expression(paste(signif(totalsales.quantiles[1]*100,3)," - ",signif(totalsales.quantiles[2]*100,3))),
as.expression(paste(signif(totalsales.quantiles[2]*100,3)," - ",signif(totalsales.quantiles[3]*100,3))),
as.expression(paste(signif(totalsales.quantiles[3]*100,3)," - ",signif(totalsales.quantiles[4]*100,3))),
as.expression(paste(signif(totalsales.quantiles[4]*100,3)," - ",signif(max(totalsales$percent)*100,3)))),
fill=c("white","lightskyblue","dodgerblue","blue2","blue4"),title="Sales Distribution (%)",cex=.7)
dev.off()
file.copy('figures/totalsales.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/totalsales.pdf')
#chloropleth map of difference in state sales distribution
diffsales <- data.frame("state"=camry$State,"percent"=state.percents.diff*-1)
colnames(diffsales) <- c("state","percent")
diffsales.quantiles <- quantile(diffsales$percent,probs=c(.2,.4,.6,.8))
for(i in 1:nrow(diffsales)) {
if(diffsales$percent[i]>diffsales.quantiles[4]) {
diffsales$color[i] <- "lightskyblue"
}
else if(diffsales$percent[i]>diffsales.quantiles[3]) {
diffsales$color[i] <- "white"
}
else if(diffsales$percent[i]>diffsales.quantiles[2]) {
diffsales$color[i] <- "lightpink"
}
else if(diffsales$percent[i]>diffsales.quantiles[1]) {
diffsales$color[i] <- "lightcoral"
}
else {
diffsales$color[i] <- "red"
}
}
pdf('figures/diffsales.pdf')
map("state",fill=TRUE,col=diffsales$color[match(mapnames.state,diffsales$state)])
legend("bottomright",leg=c(as.expression(paste(signif(min(diffsales$percent)*100,3)," - ",signif(diffsales.quantiles[1],3))),
as.expression(paste(signif(diffsales.quantiles[1]*100,3)," - ",signif(diffsales.quantiles[2]*100,3))),
as.expression(paste(signif(diffsales.quantiles[2]*100,3)," - ",signif(diffsales.quantiles[3]*100,3))),
as.expression(paste(signif(diffsales.quantiles[3]*100,3)," - ",signif(diffsales.quantiles[4]*100,3))),
as.expression(paste(signif(diffsales.quantiles[4]*100,3)," - ",signif(max(diffsales$percent)*100,3)))),
fill=c("red","lightcoral","lightpink","white","lightskyblue"),title="Sales Distribution (%)",cex=.7)
dev.off()
file.copy('figures/diffsales.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/diffsales.pdf')
#####################################################################################################
#####################################################################################################
#EIA AEO by region
region.list <- c('east.north.central','east.south.central','middle.atlantic','mountain','newengland','pacific','south.atlantic','west.north.central','west.south.central')
extract.2013proj <- function(inputtable) {
holdtable <- read.csv(inputtable,header=FALSE)
bev <- numeric(0)
phev10 <- numeric(0)
phev40 <- numeric(0)
ffv <- numeric(0)
for(i in seq(5,31,by=2)) {
bev <- c(bev,as.numeric(levels(as.ordered(holdtable[[i]][15]))))
phev10 <- c(phev10,as.numeric(levels(as.ordered(holdtable[[i]][17]))))
phev40 <- c(phev40,as.numeric(levels(as.ordered(holdtable[[i]][18]))))
ffv <- c(ffv,as.numeric(levels(as.ordered(holdtable[[i]][14]))))
}
out <- data.frame('year'=2012:2025,'BEV100'=round(bev*1000,0),'PHEV10'=round(phev10*1000,0),'PHEV40'=round(phev40*1000,0),'FFV'=round(ffv*1000,0))
return(out)
}
for(i in 1:length(region.list)) {
assign(paste(region.list[i]),extract.2013proj(paste('inputs/aeo_byregion/',region.list[i],'.csv',sep='')))
}
#####################################################################################################
#####################################################################################################
#Projected sales by state
#Prius sales for proxy, matrix for drawing values
prius.sales <- read.csv('inputs/priussales_bystate.csv',header=TRUE)
prius.sales <- prius.sales[-nrow(prius.sales),]
future.sales.bystate <- function() {
#states in each region
pacific.states <- c('ALASKA','WASHINGTON','OREGON','CALIFORNIA','HAWAII')
mountain.states <- c('MONTANA','IDAHO','NEVADA','UTAH','WYOMING','COLORADO','ARIZONA','NEW.MEXICO')
west.north.central.states <- c('NORTH.DAKOTA','MINNESOTA','SOUTH.DAKOTA','NEBRASKA','KANSAS','IOWA','MISSOURI')
west.south.central.states <- c('TEXAS','OKLAHOMA','ARKANSAS','LOUISIANA')
east.north.central.states <- c('WISCONSIN','MICHIGAN','ILLINOIS','INDIANA','OHIO')
east.south.central.states <- c('KENTUCKY','TENNESSEE','MISSISSIPPI','ALABAMA')
south.atlantic.states <- c('WEST.VIRGINIA','MARYLAND','DELAWARE','VIRGINIA','NORTH.CAROLINA','SOUTH.CAROLINA','GEORGIA','FLORIDA')
middle.atlantic.states <- c('PENNSYLVANIA','NEW.YORK','NEW.JERSEY')
newengland.states <- c('MAINE','VERMONT','NEW.HAMPSHIRE','MASSACHUSETTS','RHODE.ISLAND','CONNECTICUT')
#creating empty arrays for final list output
BEV100.out <- array(dim=c(nrow(eia.2013),50,nrow(prius.sales)))
PHEV10.out <- array(dim=c(nrow(eia.2013),50,nrow(prius.sales)))
PHEV40.out <- array(dim=c(nrow(eia.2013),50,nrow(prius.sales)))
FFV.out <- array(dim=c(nrow(eia.2013),50,nrow(prius.sales)))
for(j in 1:nrow(prius.sales)) {
hold.sales <- prius.sales[j,]
#creating empty matrices to be filled, one for each row of prius sales
BEV100.bystate <- matrix(numeric(0),nrow=nrow(eia.2013))
PHEV10.bystate <- matrix(numeric(0),nrow=nrow(eia.2013))
PHEV40.bystate <- matrix(numeric(0),nrow=nrow(eia.2013))
FFV.bystate <- matrix(numeric(0),nrow=nrow(eia.2013))
for(i in region.list) {
#takes prius sales and keeps only states within the region
hold.statesales <- hold.sales[colnames(hold.sales)%in%get(paste(i,'.states',sep=''))]
#transformation of the sales to the proportional distribution in each region
hold.statetotal <- sum(hold.statesales)
hold.statedist <- hold.statesales/hold.statetotal
for(k in colnames(eia.2013[-1])) {
#gives table of sales by state for each technology
assign(paste('hold.',k,'.bystate',sep=''),(as.matrix('[['(get(i),k),ncol=1)%*%as.matrix(hold.statedist,nrow=1)))
#appends the table to the matrix holding all other regions that have been run
assign(paste(k,'.bystate',sep=''),cbind(get(paste(k,'.bystate',sep='')),get(paste('hold.',k,'.bystate',sep=''))))
}
}
#assigns completed table (of each technology sales across 50 states) to the empty array
BEV100.out[,,j] <- BEV100.bystate
PHEV10.out[,,j] <- PHEV10.bystate
PHEV40.out[,,j] <- PHEV40.bystate
FFV.out[,,j] <- FFV.bystate
}
state.order <<- colnames(BEV100.bystate)
out <- list(BEV100.out,PHEV10.out,PHEV40.out,FFV.out)
return(out)
}
projectedsales.bystate <- future.sales.bystate()
state.order <- tolower(state.order)
state.order <- gsub('\\.',' ',state.order)
for(i in 1:ncol(eia.2013[-1])) {
assign(paste('mean.projected',colnames(eia.2013[i+1]),'.bystate',sep=''),as.data.frame(apply(projectedsales.bystate[[i]],c(1,2),mean)))
assign(paste('q25.projected',colnames(eia.2013[i+1]),'.bystate',sep=''),apply(projectedsales.bystate[[i]],c(1,2),quantile,probs=.025))
assign(paste('q975.projected',colnames(eia.2013[i+1]),'.bystate',sep=''),apply(projectedsales.bystate[[i]],c(1,2),quantile,probs=.975))
}
all.cases <- list(mean.projectedBEV100.bystate, mean.projectedPHEV10.bystate, mean.projectedPHEV40.bystate, mean.projectedFFV.bystate, q25.projectedBEV100.bystate, q25.projectedPHEV10.bystate, q25.projectedPHEV40.bystate, q25.projectedFFV.bystate, q975.projectedBEV100.bystate, q975.projectedPHEV10.bystate, q975.projectedPHEV40.bystate, q975.projectedFFV.bystate)
name.it <- function(table) {
colnames(table) <- state.order
table <- table[,order(colnames(table))]
return(table)
}
all.cases <- lapply(all.cases,name.it)
#####################################################################################################
#####################################################################################################
#Calculating projected revenue losses
future.revenue.decrease <- function(salesinput,bevrev,phev10rev,phev40rev) {
out <- list()
counter = 1
for(i in 1:length(salesinput)) {
#mod to distinguish technologies
if(i%%4 == 1) {
hold <- t(salesinput[[i]])*(camry$Total-bevrev)
out[[counter]] <- hold
counter = counter+1
}
else if(i%%4 == 2) {
hold <- t(salesinput[[i]])*(camry$Total-phev10rev)
out[[counter]] <- hold
counter = counter+1
}
else if (i%%4 ==3) {
hold <- t(salesinput[[i]])*(camry$Total-phev40rev)
out[[counter]] <- hold
counter = counter+1
}
}
return(out)
}
#list of revenue by year, state, and technology and distribution (row=state, column=year, list num=technology and dist: by 3's BEV, PHEV10, PHEV40, sets of 3 mean, q25, q975)
all.revenues.decrease <- future.revenue.decrease(all.cases,leaf$Total,prius.phev$Total,volt$Total)
#####################################################################################################
#####################################################################################################
#State level map (only needs state and technology)
#data reduction to state and technology (use only list 1-3, sum columns)
total.state.revenueloss <- data.frame('bev.loss'=rowSums(all.revenues.decrease[[1]]),'phev10.loss'=rowSums(all.revenues.decrease[[2]]),'phev40.loss'=rowSums(all.revenues.decrease[[3]]))
total.state.revenueloss$totalloss <- rowSums(total.state.revenueloss)
total.state.revenueloss$state <- rownames(total.state.revenueloss)
#assigning colors by quantiles of loss
loss.quantiles <- quantile(total.state.revenueloss$totalloss ,probs=c(.2,.4,.6,.8))
for(i in 1:nrow(total.state.revenueloss)) {
if(total.state.revenueloss$totalloss [i]>loss.quantiles[4]) {
total.state.revenueloss$color[i] <- "red4"
}
else if(total.state.revenueloss$totalloss [i]>loss.quantiles[3]) {
total.state.revenueloss$color[i] <- "red1"
}
else if(total.state.revenueloss$totalloss [i]>loss.quantiles[2]) {
total.state.revenueloss$color[i] <- "indianred2"
}
else if(total.state.revenueloss$totalloss [i]>loss.quantiles[1]) {
total.state.revenueloss$color[i] <- "pink1"
}
else {
total.state.revenueloss$color[i] <- "white"
}
}
pdf('figures/revenue_loss_bystate.pdf')
map("state",fill=TRUE,col=total.state.revenueloss$color[match(mapnames.state,total.state.revenueloss$state)])
legend("bottomright",leg=c(as.expression(paste("< ",abs(signif(loss.quantiles[1]/10^6,2)))),
as.expression(paste(abs(signif(loss.quantiles[1]/10^6,2))," - ",abs(signif(loss.quantiles[2]/10^6,2)))),
as.expression(paste(abs(signif(loss.quantiles[2]/10^6,2))," - ",abs(signif(loss.quantiles[3]/10^6,2)))),
as.expression(paste(abs(signif(loss.quantiles[3]/10^6,2))," - ",abs(signif(loss.quantiles[4]/10^6,2)))),
as.expression(paste(abs(signif(loss.quantiles[4]/10^6,2))," - ",abs(signif(max(total.state.revenueloss$totalloss)/10^6,2))))),
fill=rev(c("red4","red1","indianred2","pink1","white")),title="Revenue Loss \n(millions of $)",bty='n',cex=.7)
dev.off()
file.copy('figures/revenue_loss_bystate.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/revenue_loss_bystate.pdf')
#####################################################################################################
#Normalized losses (to 2012 population)
#reading in data on 2012 population
population.bystate.2012 <- read.csv('inputs/2012_population.csv',header=TRUE)
total.state.revenueloss$normalizedloss <- total.state.revenueloss$totalloss/population.bystate.2012$Population
#assigning color2s by quantiles of loss
normalizedloss.quantiles <- quantile(total.state.revenueloss$normalizedloss ,probs=c(.2,.4,.6,.8))
for(i in 1:nrow(total.state.revenueloss)) {
if(total.state.revenueloss$normalizedloss [i]>normalizedloss.quantiles[4]) {
total.state.revenueloss$color2[i] <- "red4"
}
else if(total.state.revenueloss$normalizedloss [i]>normalizedloss.quantiles[3]) {
total.state.revenueloss$color2[i] <- "red1"
}
else if(total.state.revenueloss$normalizedloss [i]>normalizedloss.quantiles[2]) {
total.state.revenueloss$color2[i] <- "indianred2"
}
else if(total.state.revenueloss$normalizedloss [i]>normalizedloss.quantiles[1]) {
total.state.revenueloss$color2[i] <- "pink1"
}
else {
total.state.revenueloss$color2[i] <- "white"
}
}
pdf('figures/revenue_loss_bystate_normalized.pdf')
map("state",fill=TRUE,col=total.state.revenueloss$color2[match(mapnames.state,total.state.revenueloss$state)])
legend("bottomright",leg=c(as.expression(paste("< ",abs(signif(normalizedloss.quantiles[1],2)))),
as.expression(paste(abs(signif(normalizedloss.quantiles[1],2))," - ",abs(signif(normalizedloss.quantiles[2],2)))),
as.expression(paste(abs(signif(normalizedloss.quantiles[2],2))," - ",abs(signif(normalizedloss.quantiles[3],2)))),
as.expression(paste(abs(signif(normalizedloss.quantiles[3],2))," - ",abs(signif(normalizedloss.quantiles[4],2)))),
as.expression(paste(abs(signif(normalizedloss.quantiles[4],2))," - ",abs(signif(max(total.state.revenueloss$normalizedloss),2))))),
fill=rev(c("red4","red1","indianred2","pink1","white")),title="Per Person \nRevenue Loss ($)",bty='n',cex=.7)
dev.off()
file.copy('figures/revenue_loss_bystate_normalized.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/revenue_loss_bystate_normalized.pdf')
#####################################################################################################
#####################################################################################################
#Calculating revenue loss over time
cumulative.loss.byyear <- data.frame('mean.loss.bev'=colSums(all.revenues.decrease[[1]]),'mean.loss.phev10'=colSums(all.revenues.decrease[[2]]),'mean.loss.phev40'=colSums(all.revenues.decrease[[3]]),'q25.loss.bev'=colSums(all.revenues.decrease[[4]]),'q25.loss.phev10'=colSums(all.revenues.decrease[[5]]),'q25.loss.phev40'=colSums(all.revenues.decrease[[6]]),'q975.loss.bev'=colSums(all.revenues.decrease[[7]]),'q975.loss.phev10'=colSums(all.revenues.decrease[[8]]),'q975.loss.phev40'=colSums(all.revenues.decrease[[9]]))
#distributing losses evenly
stagger.add <- function(input,lifetime=12) {
temp <- input/lifetime
hold <- rep(0,length(temp))
for(i in 1:lifetime) {
hold <- temp+hold
hold <- c(hold,0)
temp <- c(0,temp)
}
hold <- hold[-length(hold)]
return(hold)
}
annualized.totalloss <- lapply(cumulative.loss.byyear,stagger.add)
annualized.totalloss.all <- data.frame('mean.total'=annualized.totalloss[[1]]+annualized.totalloss[[2]]+annualized.totalloss[[3]],'q25.total'=annualized.totalloss[[4]]+annualized.totalloss[[5]]+annualized.totalloss[[6]],'q975.total'=annualized.totalloss[[7]]+annualized.totalloss[[8]]+annualized.totalloss[[9]])
#plotting total revenue decreases
pdf('figures/annual_revenueloss.pdf')
par(mar=c(6,4,2,2))
plot(2012:2025,annualized.totalloss[[1]][1:14]/10^6,ylim=c(0,max(annualized.totalloss.all)/10^6),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss (millions of $)')
grid()
axis(1,las=0,tck=.02)
axis(2,las=2,tck=.02)
lines(2012:2025,annualized.totalloss.all[1:14,1]/10^6,lwd=2)
lines(2012:2025,annualized.totalloss.all[1:14,2]/10^6,lwd=1.5,lty=2)
lines(2012:2025,annualized.totalloss.all[1:14,3]/10^6,lwd=1.5,lty=2)
for(i in 1:length(annualized.totalloss)) {
if(1<=i&i<=3) {
line.type <- 1
if(i%%3==1) {
color.assignment <- 'red'
lines(2012:2025,annualized.totalloss[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=2)
}
else if(i%%3==2) {
color.assignment <- 'blue'
lines(2012:2025,annualized.totalloss[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=2)
}
else {
color.assignment <- 'forestgreen'
lines(2012:2025,annualized.totalloss[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=2)
}
}
else {
line.type <- 2
if(i%%3==1) {
color.assignment <- 'red'
lines(2012:2025,annualized.totalloss[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=1.5)
}
else if(i%%3==2) {
color.assignment <- 'blue'
lines(2012:2025,annualized.totalloss[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=1.5)
}
else {
color.assignment <- 'forestgreen'
lines(2012:2025,annualized.totalloss[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=1.5)
}
}
}
legend("topleft",c('Total','BEV','PHEV-10','PHEV-40'),lty=c(1,1,1,1),col=c('black','red','blue','forestgreen'),bg='white')
dev.off()
file.copy('figures/annual_revenueloss.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/annual_revenueloss.pdf')
#for 2x2 grid
pdf('figures/annual_revenueloss_total.pdf')
par(mar=c(6,8,2,2))
plot(2012:2025,annualized.totalloss[[1]][1:14]/10^6,ylim=c(0,max(annualized.totalloss.all)/10^6),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss\n(millions of $)',cex.lab=2)
grid()
axis(1,las=0,tck=.02,cex.axis=1.5)
axis(2,las=2,tck=.02,cex.axis=1.5)
lines(2012:2025,annualized.totalloss.all[1:14,1]/10^6,lwd=2)
lines(2012:2025,annualized.totalloss.all[1:14,2]/10^6,lwd=1.5,lty=2)
lines(2012:2025,annualized.totalloss.all[1:14,3]/10^6,lwd=1.5,lty=2)
dev.off()
file.copy('figures/annual_revenueloss_total.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/annual_revenueloss_total.pdf')
pdf('figures/annual_revenueloss_bev.pdf')
par(mar=c(6,8,2,2))
plot(2012:2025,annualized.totalloss[[1]][1:14]/10^6,ylim=c(0,max(annualized.totalloss.all)/10^6),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss\n(millions of $)',cex.lab=2)
grid()
axis(1,las=0,tck=.02,cex.axis=1.5)
axis(2,las=2,tck=.02,cex.axis=1.5)
lines(2012:2025,annualized.totalloss[[1]][1:14]/10^6,lwd=2,col='red')
lines(2012:2025,annualized.totalloss[[4]][1:14]/10^6,lwd=1.5,lty=2,col='red')
lines(2012:2025,annualized.totalloss[[7]][1:14]/10^6,lwd=1.5,lty=2,col='red')
dev.off()
file.copy('figures/annual_revenueloss_bev.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/annual_revenueloss_bev.pdf')
pdf('figures/annual_revenueloss_phev10.pdf')
par(mar=c(6,8,2,2))
plot(2012:2025,annualized.totalloss[[1]][1:14]/10^6,ylim=c(0,max(annualized.totalloss.all)/10^6),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss\n(millions of $)',cex.lab=2)
grid()
axis(1,las=0,tck=.02,cex.axis=1.5)
axis(2,las=2,tck=.02,cex.axis=1.5)
lines(2012:2025,annualized.totalloss[[2]][1:14]/10^6,lwd=2,col='blue')
lines(2012:2025,annualized.totalloss[[5]][1:14]/10^6,lwd=1.5,lty=2,col='blue')
lines(2012:2025,annualized.totalloss[[8]][1:14]/10^6,lwd=1.5,lty=2,col='blue')
dev.off()
file.copy('figures/annual_revenueloss_phev10.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/annual_revenueloss_phev10.pdf')
pdf('figures/annual_revenueloss_phev40.pdf')
par(mar=c(6,8,2,2))
plot(2012:2025,annualized.totalloss[[1]][1:14]/10^6,ylim=c(0,max(annualized.totalloss.all)/10^6),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss\n(millions of $)',cex.lab=2)
grid()
axis(1,las=0,cex.axis=1.5)
axis(2,las=2,cex.axis=1.5)
lines(2012:2025,annualized.totalloss[[3]][1:14]/10^6,lwd=2,col='forestgreen')
lines(2012:2025,annualized.totalloss[[6]][1:14]/10^6,lwd=1.5,lty=2,col='forestgreen')
lines(2012:2025,annualized.totalloss[[9]][1:14]/10^6,lwd=1.5,lty=2,col='forestgreen')
dev.off()
file.copy('figures/annual_revenueloss_phev40.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/annual_revenueloss_phev40.pdf')
#####################################################################################################
#####################################################################################################
#Sensitivity Analysis
#function to generate revenue from alternative policy: registration fee
alt1 <- function(input,msrp,percentage,lifetime) {
out <- input
out$Registration.Fees <- msrp*percentage*lifetime
out$Total <- rowSums(out[,3:7])
return(out)
}
alt1.leaf <- alt1(leaf,21300,.01,12)
alt1.volt <- alt1(volt,31645,.01,12)
alt1.priusphev <- alt1(prius.phev,32000,.01,12)
#function to generate revenue from alternative policy: use fee
alt2 <- function(input,electricmode.percent,vmt,lifetime,taxrate) {
out <- input
out$State.Fuel.Tax <- out$State.Fuel.Tax+lifetime*vmt*electricmode.percent*taxrate
out$Total <- rowSums(out[,3:7])
return(out)
}
alt2.leaf <- alt2(leaf,1,12000,12,.01)
alt2.volt <- alt2(volt,.64,12000,12,.01)
alt2.priusphev <- alt2(prius.phev,.288,12000,12,.01)
#sensitivity on registration fee
sensitivity1 <- function(input) {
hold <- future.revenue.decrease(all.cases,alt1(leaf,21300,input,12)$Total,alt1(prius.phev,32000,input,12)$Total,alt1(volt,31645,input,12)$Total)
out <- as.matrix(data.frame('mean.totalrev'=sum(hold[[1]],hold[[2]],hold[[3]]),'q25.totalrev'=sum(hold[[4]],hold[[5]],hold[[6]]),'q975.totalrev'=sum(hold[[7]],hold[[8]],hold[[9]])))
return(out)
}
alt1.sensitivity <- sapply(seq(0,.01,by=.001),sensitivity1)
#sensitivity on use fee
sensitivity2 <- function(input) {
hold <- future.revenue.decrease(all.cases,alt2(leaf,1,12000,12,input)$Total,alt2(prius.phev,.288,12000,12,input)$Total,alt2(volt,.64,12000,12,input)$Total)
out <- as.matrix(data.frame('mean.totalrev'=sum(hold[[1]],hold[[2]],hold[[3]]),'q25.totalrev'=sum(hold[[4]],hold[[5]],hold[[6]]),'q975.totalrev'=sum(hold[[7]],hold[[8]],hold[[9]])))
return(out)
}
alt2.sensitivity <- sapply(seq(0,.1,by=.01),sensitivity2)
#plot of sensitivity on registration fee
pdf('figures/registrationfee_sensitivity.pdf')
par(mar=c(8,8,1,1)+0.1)
plot(seq(0,.01,by=.001)*100,alt1.sensitivity[1,]/10^9,type="n",lwd=2.5,xlab="",ylab="",ylim=c(min(alt1.sensitivity/10^9),max(alt1.sensitivity/10^9)),xaxt='n',yaxt='n')
axis(1,cex.axis=1,las=0,tck=.02,cex.axis=2)
axis(2,cex.axis=1,las=2,tck=.02,cex.axis=2)
mtext('Annual Registration Fee \n Percentage of MSRP',side=1,line=5,cex=2)
mtext('Cumulative Revenue Decrease from \nEVs by 2025 (billions of dollars)',side=2,line=4,cex=2)
grid()
abline(h=0,col='red')
lines(seq(0,.01,by=.001)*100,alt1.sensitivity[1,]/10^9,lwd=2.5)
lines(c(0,0.01)*100,c(alt1.sensitivity[3,1],alt1.sensitivity[2,11])/10^9,lwd=2.5,lty=2)
lines(c(0,0.01)*100,c(alt1.sensitivity[2,1],alt1.sensitivity[3,11])/10^9,lwd=2.5,lty=2)
dev.off()
file.copy('figures/registrationfee_sensitivity.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/registrationfee_sensitivity.pdf')
#plot of sensitivity on use fee
pdf('figures/usefee_sensitivity.pdf')
par(mar=c(6,8,1,1)+0.1)
plot(seq(0,.1,by=.01)*100,alt2.sensitivity[1,]/10^9,type="n",lwd=2.5,xlab="",ylab="",ylim=c(min(alt2.sensitivity/10^9),max(alt2.sensitivity/10^9)),xaxt='n',yaxt='n')
axis(1,cex.axis=1,las=0,tck=.02,cex.axis=2)
axis(2,cex.axis=1,las=2,tck=.02,cex.axis=2)
mtext('Use Fee Tax (cents per mile)',side=1,line=3,cex=2)
mtext('Cumulative Revenue Decrease from \nEVs by 2025 (billions of dollars)',side=2,line=4,cex=2)
grid()
abline(h=0,col='red')
lines(seq(0,.1,by=.01)*100,alt2.sensitivity[1,]/10^9,lwd=2.5)
lines(c(0,0.1)*100,c(alt2.sensitivity[3,1]/10^9,alt2.sensitivity[2,11]/10^9),lwd=2.5,lty=2)
lines(c(0,0.1)*100,c(alt2.sensitivity[2,1]/10^9,alt2.sensitivity[3,11]/10^9),lwd=2.5,lty=2)
dev.off()
file.copy('figures/usefee_sensitivity.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/usefee_sensitivity.pdf')
###
#upper-bound analysis
###
epri.nrdc.2007 <- eia.2013
epri.nrdc.2007$BEV100 <- epri.nrdc.2007$BEV*forecasts2$EPRI.NRDC2007[5:18]/forecasts2$AEO2013[5:18]
epri.nrdc.2007$PHEV10 <- epri.nrdc.2007$PHEV10*forecasts2$EPRI.NRDC2007[5:18]/forecasts2$AEO2013[5:18]
epri.nrdc.2007$PHEV40 <- epri.nrdc.2007$PHEV40*forecasts2$EPRI.NRDC2007[5:18]/forecasts2$AEO2013[5:18]
epri.nrdc.2007$FFV <- epri.nrdc.2007$FFV*forecasts2$EPRI.NRDC2007[5:18]/forecasts2$AEO2013[5:18]
future.sales.bystate.upper <- function() {
#states in each region
pacific.states <- c('ALASKA','WASHINGTON','OREGON','CALIFORNIA','HAWAII')
mountain.states <- c('MONTANA','IDAHO','NEVADA','UTAH','WYOMING','COLORADO','ARIZONA','NEW.MEXICO')
west.north.central.states <- c('NORTH.DAKOTA','MINNESOTA','SOUTH.DAKOTA','NEBRASKA','KANSAS','IOWA','MISSOURI')
west.south.central.states <- c('TEXAS','OKLAHOMA','ARKANSAS','LOUISIANA')
east.north.central.states <- c('WISCONSIN','MICHIGAN','ILLINOIS','INDIANA','OHIO')
east.south.central.states <- c('KENTUCKY','TENNESSEE','MISSISSIPPI','ALABAMA')
south.atlantic.states <- c('WEST.VIRGINIA','MARYLAND','DELAWARE','VIRGINIA','NORTH.CAROLINA','SOUTH.CAROLINA','GEORGIA','FLORIDA')
middle.atlantic.states <- c('PENNSYLVANIA','NEW.YORK','NEW.JERSEY')
newengland.states <- c('MAINE','VERMONT','NEW.HAMPSHIRE','MASSACHUSETTS','RHODE.ISLAND','CONNECTICUT')
#creating empty arrays for final list output
BEV100.out <- array(dim=c(nrow(epri.nrdc.2007),50,nrow(prius.sales)))
PHEV10.out <- array(dim=c(nrow(epri.nrdc.2007),50,nrow(prius.sales)))
PHEV40.out <- array(dim=c(nrow(epri.nrdc.2007),50,nrow(prius.sales)))
FFV.out <- array(dim=c(nrow(epri.nrdc.2007),50,nrow(prius.sales)))
for(j in 1:nrow(prius.sales)) {
hold.sales <- prius.sales[j,]
#creating empty matrices to be filled, one for each row of prius sales
BEV100.bystate <- matrix(numeric(0),nrow=nrow(epri.nrdc.2007))
PHEV10.bystate <- matrix(numeric(0),nrow=nrow(epri.nrdc.2007))
PHEV40.bystate <- matrix(numeric(0),nrow=nrow(epri.nrdc.2007))
FFV.bystate <- matrix(numeric(0),nrow=nrow(epri.nrdc.2007))
for(i in region.list) {
#takes prius sales and keeps only states within the region
hold.statesales <- hold.sales[colnames(hold.sales)%in%get(paste(i,'.states',sep=''))]
#transformation of the sales to the proportional distribution in each region
hold.statetotal <- sum(hold.statesales)
hold.statedist <- hold.statesales/hold.statetotal
for(k in colnames(epri.nrdc.2007[-1])) {
#gives table of sales by state for each technology
assign(paste('hold.',k,'.bystate',sep=''),(as.matrix('[['(get(i),k),ncol=1)%*%as.matrix(hold.statedist,nrow=1)))
#appends the table to the matrix holding all other regions that have been run
assign(paste(k,'.bystate',sep=''),cbind(get(paste(k,'.bystate',sep='')),get(paste('hold.',k,'.bystate',sep=''))))
}
}
#assigns completed table (of each technology sales across 50 states) to the empty array
BEV100.out[,,j] <- BEV100.bystate
PHEV10.out[,,j] <- PHEV10.bystate
PHEV40.out[,,j] <- PHEV40.bystate
FFV.out[,,j] <- FFV.bystate
}
state.order <<- colnames(BEV100.bystate)
out <- list(BEV100.out,PHEV10.out,PHEV40.out,FFV.out)
return(out)
}
projectedsales.bystate.upper <- future.sales.bystate.upper()
for(i in 1:length(projectedsales.bystate.upper)){
for(j in 1:dim(projectedsales.bystate.upper[[i]])[3])
for(k in 1:ncol(projectedsales.bystate.upper[[i]][,,j]))
projectedsales.bystate.upper[[i]][,k,j] <- projectedsales.bystate.upper[[i]][,k,j]*forecasts2$EPRI.NRDC2007[5:18]/forecasts2$AEO2013[5:18]
}
state.order <- tolower(state.order)
state.order <- gsub('\\.',' ',state.order)
for(i in 1:ncol(epri.nrdc.2007[-1])) {
assign(paste('mean.projected',colnames(epri.nrdc.2007[i+1]),'.bystate.upper',sep=''),as.data.frame(apply(projectedsales.bystate.upper[[i]],c(1,2),mean)))
assign(paste('q25.projected',colnames(epri.nrdc.2007[i+1]),'.bystate.upper',sep=''),apply(projectedsales.bystate.upper[[i]],c(1,2),quantile,probs=.025))
assign(paste('q975.projected',colnames(epri.nrdc.2007[i+1]),'.bystate.upper',sep=''),apply(projectedsales.bystate.upper[[i]],c(1,2),quantile,probs=.975))
}
all.cases.upper <- list(mean.projectedBEV100.bystate.upper, mean.projectedPHEV10.bystate.upper, mean.projectedPHEV40.bystate.upper, mean.projectedFFV.bystate.upper, q25.projectedBEV100.bystate.upper, q25.projectedPHEV10.bystate.upper, q25.projectedPHEV40.bystate.upper, q25.projectedFFV.bystate.upper, q975.projectedBEV100.bystate.upper, q975.projectedPHEV10.bystate.upper, q975.projectedPHEV40.bystate.upper, q975.projectedFFV.bystate.upper)
name.it <- function(table) {
colnames(table) <- state.order
table <- table[,order(colnames(table))]
return(table)
}
all.cases.upper <- lapply(all.cases.upper,name.it)
#####################################################################################################
#####################################################################################################
#Calculating projected revenue losses
#list of revenue by year, state, and technology and distribution (row=state, column=year, list num=technology and dist: by 3's BEV, PHEV10, PHEV40, sets of 3 mean, q25, q975)
all.revenues.decrease.upper <- future.revenue.decrease(all.cases.upper,leaf$Total,prius.phev$Total,volt$Total)
#####################################################################################################
#####################################################################################################
#State level map (only needs state and technology)
#data reduction to state and technology (use only list 1-3, sum columns)
total.state.revenueloss.upper <- data.frame('bev.loss'=rowSums(all.revenues.decrease.upper[[1]]),'phev10.loss'=rowSums(all.revenues.decrease.upper[[2]]),'phev40.loss'=rowSums(all.revenues.decrease.upper[[3]]))
total.state.revenueloss.upper$totalloss <- rowSums(total.state.revenueloss.upper)
total.state.revenueloss.upper$state <- rownames(total.state.revenueloss.upper)
#assigning colors by quantiles of loss
loss.quantiles.upper <- quantile(total.state.revenueloss.upper$totalloss ,probs=c(.2,.4,.6,.8))
for(i in 1:nrow(total.state.revenueloss.upper)) {
if(total.state.revenueloss.upper$totalloss [i]>loss.quantiles.upper[4]) {
total.state.revenueloss.upper$color[i] <- "red4"
}
else if(total.state.revenueloss.upper$totalloss [i]>loss.quantiles.upper[3]) {
total.state.revenueloss.upper$color[i] <- "red1"
}
else if(total.state.revenueloss.upper$totalloss [i]>loss.quantiles.upper[2]) {
total.state.revenueloss.upper$color[i] <- "indianred2"
}
else if(total.state.revenueloss.upper$totalloss [i]>loss.quantiles.upper[1]) {
total.state.revenueloss.upper$color[i] <- "pink1"
}
else {
total.state.revenueloss.upper$color[i] <- "white"
}
}
pdf('figures/upperbound_revenue_loss_bystate.pdf')
map("state",fill=TRUE,col=total.state.revenueloss.upper$color[match(mapnames.state,total.state.revenueloss.upper$state)])
legend("bottomright",leg=c(as.expression(paste("< ",abs(signif(loss.quantiles.upper[1]/10^6,2)))),
as.expression(paste(abs(signif(loss.quantiles.upper[1]/10^6,2))," - ",abs(signif(loss.quantiles.upper[2]/10^6,2)))),
as.expression(paste(abs(signif(loss.quantiles.upper[2]/10^6,2))," - ",abs(signif(loss.quantiles.upper[3]/10^6,2)))),
as.expression(paste(abs(signif(loss.quantiles.upper[3]/10^6,2))," - ",abs(signif(loss.quantiles.upper[4]/10^6,2)))),
as.expression(paste(abs(signif(loss.quantiles.upper[4]/10^6,2))," - ",abs(signif(max(total.state.revenueloss.upper$totalloss)/10^6,2))))),
fill=rev(c("red4","red1","indianred2","pink1","white")),title="Revenue Loss \n(millions of $)",bty='n',cex=.7)
dev.off()
#####################################################################################################
#####################################################################################################
#Calculating revenue loss over time
cumulative.loss.byyear.upper <- data.frame('mean.loss.bev'=colSums(all.revenues.decrease.upper[[1]]),'mean.loss.phev10'=colSums(all.revenues.decrease.upper[[2]]),'mean.loss.phev40'=colSums(all.revenues.decrease.upper[[3]]),'q25.loss.bev'=colSums(all.revenues.decrease.upper[[4]]),'q25.loss.phev10'=colSums(all.revenues.decrease.upper[[5]]),'q25.loss.phev40'=colSums(all.revenues.decrease.upper[[6]]),'q975.loss.bev'=colSums(all.revenues.decrease.upper[[7]]),'q975.loss.phev10'=colSums(all.revenues.decrease.upper[[8]]),'q975.loss.phev40'=colSums(all.revenues.decrease.upper[[9]]))
annualized.totalloss.upper <- lapply(cumulative.loss.byyear.upper,stagger.add)
annualized.totalloss.all.upper <- data.frame('mean.total'=annualized.totalloss.upper[[1]]+annualized.totalloss.upper[[2]]+annualized.totalloss.upper[[3]],'q25.total'=annualized.totalloss.upper[[4]]+annualized.totalloss.upper[[5]]+annualized.totalloss.upper[[6]],'q975.total'=annualized.totalloss.upper[[7]]+annualized.totalloss.upper[[8]]+annualized.totalloss.upper[[9]])
#plotting total revenue decreases
pdf('figures/upperbound_annual_revenueloss.pdf')
par(mar=c(6,4,2,2))
plot(2012:2025,annualized.totalloss.upper[[1]][1:14]/10^6,ylim=c(0,max(annualized.totalloss.all.upper)/10^6),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss (millions of $)')
grid()
axis(1,las=0,tck=.02)
axis(2,las=2,tck=.02)
lines(2012:2025,annualized.totalloss.all.upper[1:14,1]/10^6,lwd=2)
lines(2012:2025,annualized.totalloss.all.upper[1:14,2]/10^6,lwd=1.5,lty=2)
lines(2012:2025,annualized.totalloss.all.upper[1:14,3]/10^6,lwd=1.5,lty=2)
for(i in 1:length(annualized.totalloss.upper)) {
if(1<=i&i<=3) {
line.type <- 1
if(i%%3==1) {
color.assignment <- 'red'
lines(2012:2025,annualized.totalloss.upper[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=2)
}
else if(i%%3==2) {
color.assignment <- 'blue'
lines(2012:2025,annualized.totalloss.upper[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=2)
}
else {
color.assignment <- 'forestgreen'
lines(2012:2025,annualized.totalloss.upper[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=2)
}
}
else {
line.type <- 2
if(i%%3==1) {
color.assignment <- 'red'
lines(2012:2025,annualized.totalloss.upper[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=1.5)
}
else if(i%%3==2) {
color.assignment <- 'blue'
lines(2012:2025,annualized.totalloss.upper[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=1.5)
}
else {
color.assignment <- 'forestgreen'
lines(2012:2025,annualized.totalloss.upper[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=1.5)
}
}
}
legend("topleft",c('Total','BEV','PHEV-10','PHEV-40'),lty=c(1,1,1,1),col=c('black','red','blue','forestgreen'),bg='white')
dev.off()
#for 2x2 grid
pdf('figures/upperbound_annual_revenueloss_total.pdf')
par(mar=c(6,8,2,2))
plot(2012:2025,annualized.totalloss.upper[[1]][1:14]/10^6,ylim=c(0,max(annualized.totalloss.all.upper)/10^6),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss\n(millions of $)',cex.lab=2)
grid()
axis(1,las=0,tck=.02,cex.axis=1.5)
axis(2,las=2,tck=.02,cex.axis=1.5)
lines(2012:2025,annualized.totalloss.all.upper[1:14,1]/10^6,lwd=2)
lines(2012:2025,annualized.totalloss.all.upper[1:14,2]/10^6,lwd=1.5,lty=2)
lines(2012:2025,annualized.totalloss.all.upper[1:14,3]/10^6,lwd=1.5,lty=2)
dev.off()
pdf('figures/upperbound_annual_revenueloss_bev.pdf')
par(mar=c(6,8,2,2))
plot(2012:2025,annualized.totalloss.upper[[1]][1:14]/10^6,ylim=c(0,max(annualized.totalloss.all.upper)/10^6),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss\n(millions of $)',cex.lab=2)
grid()
axis(1,las=0,tck=.02,cex.axis=1.5)
axis(2,las=2,tck=.02,cex.axis=1.5)
lines(2012:2025,annualized.totalloss.upper[[1]][1:14]/10^6,lwd=2,col='red')
lines(2012:2025,annualized.totalloss.upper[[4]][1:14]/10^6,lwd=1.5,lty=2,col='red')
lines(2012:2025,annualized.totalloss.upper[[7]][1:14]/10^6,lwd=1.5,lty=2,col='red')
dev.off()
pdf('figures/upperbound_annual_revenueloss_phev10.pdf')
par(mar=c(6,8,2,2))
plot(2012:2025,annualized.totalloss.upper[[1]][1:14]/10^6,ylim=c(0,max(annualized.totalloss.all.upper)/10^6),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss\n(millions of $)',cex.lab=2)
grid()
axis(1,las=0,tck=.02,cex.axis=1.5)
axis(2,las=2,tck=.02,cex.axis=1.5)
lines(2012:2025,annualized.totalloss.upper[[2]][1:14]/10^6,lwd=2,col='blue')
lines(2012:2025,annualized.totalloss.upper[[5]][1:14]/10^6,lwd=1.5,lty=2,col='blue')
lines(2012:2025,annualized.totalloss.upper[[8]][1:14]/10^6,lwd=1.5,lty=2,col='blue')
dev.off()
pdf('figures/upperbound_annual_revenueloss_phev40.pdf')
par(mar=c(6,8,2,2))
plot(2012:2025,annualized.totalloss.upper[[1]][1:14]/10^6,ylim=c(0,max(annualized.totalloss.all.upper)/10^6),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss\n(millions of $)',cex.lab=2)
grid()
axis(1,las=0,cex.axis=1.5)
axis(2,las=2,cex.axis=1.5)
lines(2012:2025,annualized.totalloss.upper[[3]][1:14]/10^6,lwd=2,col='forestgreen')
lines(2012:2025,annualized.totalloss.upper[[6]][1:14]/10^6,lwd=1.5,lty=2,col='forestgreen')
lines(2012:2025,annualized.totalloss.upper[[9]][1:14]/10^6,lwd=1.5,lty=2,col='forestgreen')
dev.off()
pdf('figures/upperbound_annual_total_comparison.pdf')
par(mar=c(6,8,2,2))
plot(2012:2025,annualized.totalloss.upper[[1]][1:14]/10^9,ylim=c(0,max(annualized.totalloss.all.upper)/10^9),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss\n(billions of $)',cex.lab=2)
grid()
axis(1,las=0,tck=.02,cex.axis=1.5)
axis(2,las=2,tck=.02,cex.axis=1.5)
lines(2012:2025,annualized.totalloss.all.upper[1:14,1]/10^9,lwd=2,col='blue4')
lines(2012:2025,annualized.totalloss.all.upper[1:14,2]/10^9,lwd=1.5,lty=2,col='blue4')
lines(2012:2025,annualized.totalloss.all.upper[1:14,3]/10^9,lwd=1.5,lty=2,col='blue4')
text(x=2023,y=.9,pos=2,labels='EPRI/NRDC EV Sales')
lines(2012:2025,annualized.totalloss.all[1:14,1]/10^9,lwd=2)
lines(2012:2025,annualized.totalloss.all[1:14,2]/10^9,lwd=1.5,lty=2)
lines(2012:2025,annualized.totalloss.all[1:14,3]/10^9,lwd=1.5,lty=2)
text(x=2024.5,y=.03,pos=2,labels='AEO2013 EV Sales')
dev.off()
#####################################################################################################
| /vehicletax_graphs.R | no_license | headisbagent/proj2_electricCarTaxes | R | false | false | 70,418 | r | rm(list=ls())
#####################################################################################################
#Load packages
library(maps)
library(mapproj)
library(mapdata)
library(mapplots)
library(maptools)
library(ggplot2)
library(reshape)
#####################################################################################################
#####################################################################################################
#National level data summary
fed.highway <- read.csv("inputs/highwayfinances.csv",header=TRUE)
#Plot of federal highway funding/spending
pdf('figures/highway_finances.pdf')
par(mar=c(6,8,2,2))
plot(fed.highway$year,fed.highway$fueltax/10^6,type="n",xlab="Year",ylab="Annual revenue and expenditure\n for highways (billions of $)",ylim=c(0,max(fed.highway)/10^6),cex.lab=2,xaxt='n',yaxt='n')
grid()
axis(1,las=0,tck=.02,cex.axis=2)
axis(2,las=2,tck=.02,cex.axis=2)
lines(fed.highway$year,fed.highway$total/10^6,lwd=3,col="blue")
lines(fed.highway$year,fed.highway$expenditure/10^6,lwd=3,col="red")
text(x=c(1998.5,2008),y=c(35,43.5),pos=c(2,2),labels=c("Revenue","Expenditures"))
dev.off()
file.copy('figures/highway_finances.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/highway_finances.pdf')
pdf('figures/forslides/highway_finances.pdf',height=8,width=12)
par(mar=c(6,6,2,2))
plot(fed.highway$year,fed.highway$fueltax/10^6,type="n",xlab="Year",ylab="Annual revenue/spending\n for highways (billions of $)",ylim=c(0,max(fed.highway)/10^6),cex.lab=1.5,xaxt='n',yaxt='n')
grid()
axis(1,las=0,tck=.02)
axis(2,las=2,tck=.02)
lines(fed.highway$year,fed.highway$total/10^6,lwd=3,col="blue")
lines(fed.highway$year,fed.highway$expenditure/10^6,lwd=3,col="red")
text(x=c(1998.5,2008),y=c(35,43.5),pos=c(2,2),labels=c("Revenue","Expenditures"))
dev.off()
#####################################################################################################
#####################################################################################################
#State level data summary
#data import
state.finances <- read.csv("inputs/statefinances.csv",header=TRUE)
state.revenues <- state.finances[,1:8]
state.expenditures <- state.finances[,c(1,10:12,14:18)]
#re-ordering data so that the bar plot goes highest to smallest
state.revenues$state <- reorder(state.revenues$state,rowSums(state.revenues[-1]))
state.expenditures$state <- reorder(state.expenditures$state,rowSums(state.expenditures[-1]))
state.revenues.total <- rowSums(state.revenues[,c(2:length(state.revenues))])
state.expenditures.total <- rowSums(state.expenditures[,c(2:length(state.expenditures))])
state.diff <- data.frame('state'=state.revenues$state,'diff'=state.revenues.total-state.expenditures.total)
#function returning top 25
top25 <- function(input) {
input$sum <- rowSums(input[-1])
input <- input[order(-input[,ncol(input)]),]
return(input[-ncol(input)])
}
state.revenues1 <- top25(state.revenues)[1:26,]
state.revenues2 <- top25(state.revenues)[27:51,]
state.expenditures1 <- state.expenditures[state.expenditures$state%in%state.revenues1$state,]
state.expenditures2 <- state.expenditures[state.expenditures$state%in%state.revenues2$state,]
state.diff1 <- state.diff[state.diff$state%in%state.revenues1$state,]
state.diff2 <- state.diff[state.diff$state%in%state.revenues2$state,]
state.diff1 <- state.diff1[order(match(state.diff1$state,state.revenues1$state)),]
state.diff2 <- state.diff2[order(match(state.diff2$state,state.revenues2$state)),]
state.diff1$type = 'Difference'
state.diff2$type = 'Difference'
#Melt data tables together for ggplot input
m.state.revenues1 <- melt(state.revenues1,id.vars='state')
m.state.expenditures1 <- melt(state.expenditures1,id.vars='state')
m.state.revenues2 <- melt(state.revenues2,id.vars='state')
m.state.expenditures2 <- melt(state.expenditures2,id.vars='state')
empty.hold1 <- data.frame('state'=unique(m.state.expenditures1$state),'variable'='none',value=0,type='Expenditure')
empty.hold2 <- data.frame('state'=unique(m.state.expenditures2$state),'variable'='none',value=0,type='Expenditure')
m.state.revenues1$type <- 'Revenue'
m.state.expenditures1$type <- 'Expenditure'
m.state.diff1$type <- 'Difference'
m.state.all1 <- rbind(m.state.revenues1,empty.hold1,m.state.expenditures1)
m.state.revenues2$type <- 'Revenue'
m.state.expenditures2$type <- 'Expenditure'
m.state.diff2$type <- 'Difference'
m.state.all2 <- rbind(m.state.revenues2,empty.hold2,m.state.expenditures2)
color.vector <- c('navy','blue3','blue1','dodgerblue3','dodgerblue1','lightblue3','lightblue1','white','brown4','lightsalmon4','firebrick1','indianred2','lightpink3','lightpink2','lightpink','mistyrose2')
#plot of top 25 states broken down by components, separated by rev and exp
pdf('figures/top25_states.pdf',height=9,width=9)
#dev.new(height=6,width=9)
ggplot(m.state.all1,aes(x=state,y=value/10^6,fill=variable,width=.8))+
geom_bar(stat='identity',color='black')+
facet_wrap(~type)+
coord_flip()+
xlab("State")+
ylab("Billions of Dollars ($)")+
theme(axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"),
panel.background=element_rect(fill='white',colour='white'),
panel.grid.major.x=element_line(colour="black"),
panel.grid.minor.x=element_line(colour="white"))+
scale_fill_manual(name="Source",values=color.vector,labels=c("Use Taxes","Tolls","Gen Funds","Misc", "Bonds","Fed Gov","Local Gov","","Federal Highway","Other","Roads/Streets","Maintenance","Admin/Police","Interest","Bonds","Grants"))
dev.off()
file.copy('figures/top25_states.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/top25_states.pdf')
pdf('figures/top25_states_diff.pdf',height=9,width=6)
ggplot(state.diff1,aes(x=state,y=diff/10^6,width=.8))+
geom_bar(stat='identity')+
facet_wrap(~type)+
coord_flip()+
xlab("State")+
ylab("Billions of Dollars ($)")+
theme(axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"),
panel.background=element_rect(fill='white',colour='white'),
panel.grid.major.x=element_line(colour="black"),
panel.grid.minor.x=element_line(colour="white"))
dev.off()
file.copy('figures/top25_states_diff.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/top25_states_diff.pdf')
#plot of bottom 25 states broken down by components, separated by rev and exp
pdf('figures/bottom25_states.pdf',height=9,width=9)
#dev.new(height=6,width=9)
ggplot(m.state.all2,aes(x=state,y=value/10^6,fill=variable,width=.8))+
geom_bar(stat='identity',color='black')+
facet_wrap(~type)+
coord_flip()+
xlab("State")+
ylab("Billions of Dollars ($)")+
theme(axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"),
panel.background=element_rect(fill='white',colour='white'),
panel.grid.major.x=element_line(colour="black"),
panel.grid.minor.x=element_line(colour="white"))+
scale_fill_manual(name="Source",values=color.vector,labels=c("Use Taxes","Tolls","Gen Funds","Misc", "Bonds","Fed Gov","Local Gov","","Federal Highway","Other","Roads/Streets","Maintenance","Admin/Police","Interest","Bonds","Grants"))
dev.off()
file.copy('figures/bottom25_states.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/bottom25_states.pdf')
pdf('figures/bottom25_states_diff.pdf',height=9,width=6)
ggplot(state.diff2,aes(x=state,y=diff/10^6,width=.8))+
geom_bar(stat='identity')+
facet_wrap(~type)+
coord_flip()+
xlab("State")+
ylab("Billions of Dollars ($)")+
theme(axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"),
panel.background=element_rect(fill='white',colour='white'),
panel.grid.major.x=element_line(colour="black"),
panel.grid.minor.x=element_line(colour="white"))
dev.off()
file.copy('figures/bottom25_states_diff.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/bottom25_states_diff.pdf')
#####################################################################################################
#####################################################################################################
#EV Sales and projections
#load electric vehicle sales
ev.sales <- read.csv('inputs/ev_sales.csv')
ev.sales[is.na(ev.sales)] <- 0
ev.sales$Date <- as.Date(ev.sales$Date,"%m-%d-%Y")
#plot electric vehicle sales by model
colors.plot <- c("black","blue4","darkgreen","red2","orchid4","yellow1","orange","lightblue2","green","lightpink","violetred","gold","brown","dodgerblue2")
pdf('figures/ev_sales_bymodel.pdf')
par(mar=c(6,6,2,2))
plot(Chevrolet.Volt~Date,ev.sales,type="n",xlab="Date (monthly)",ylab="EV Sales (monthly)",ylim=c(min(ev.sales[,-1]),max(ev.sales[-1])),cex.lab=1.5,xaxt='n',yaxt='n')
axis(1,las=0,tck=.02)
axis(2,las=2,tck=.02)
#axis(1,ev.sales[,1],format(ev.sales$Date,"%m-%Y"),las=2,cex.axis=.5)
for(i in 2:length(ev.sales)) {
lines(ev.sales[,1],ev.sales[,i],col=colors.plot[i-1],lwd=2.5)
}
legend("topleft",c("Volt (PHEV)","Leaf (BEV)","SmartED (BEV)","i-MiEV (BEV)","ActiveE (BEV)","Prius (PHEV)","Focus (BEV)","Fit (BEV)","Model S (BEV)","RAV4 (BEV)","C-Max Energi (PHEV)","Accord (PHEV)","Fusion Energi (PHEV)","Spark (BEV)"),col=colors.plot,lwd=2,cex=.7)
dev.off()
file.copy('figures/ev_sales_bymodel.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/ev_sales_bymodel.pdf')
#####################################################################################################
#Popular sales
pop.evsales <- data.frame('date'=ev.sales$Date,'volt'=ev.sales$Chevrolet.Volt,'leaf'=ev.sales$Nissan.Leaf,'prius'=ev.sales$Toyota.Prius.PHV,'model.s'=ev.sales$Tesla.Model.S,'other'=rowSums(ev.sales[,c(4,5,6,8,9,11,12,13,14,15)]))
#plot popular electric vehicle sales by model
colors.plot2 <- c('black','blue4','darkgreen','red2','orange')
pdf('figures/popular_ev_sales_bymodel.pdf',height=5,width=8)
par(mar=c(6,6,2,2))
plot(volt~date,pop.evsales,type="n",xlab="Date (monthly)",ylab="EV Sales (monthly)",ylim=c(min(pop.evsales[,-1]),max(pop.evsales[-1])),cex.lab=1,xaxt='n',yaxt='n')
axis(1,las=0,at=pop.evsales$date[format(pop.evsales$date,"%m")=='01'|format(pop.evsales$date,"%m")=='07'],labels=format(pop.evsales$date[format(pop.evsales$date,"%m")=='01'|format(pop.evsales$date,"%m")=='07'],"%m-%Y"),cex=.8,tck=.02)
axis(2,las=2,tck=.02)
#axis(1,ev.sales[,1],format(ev.sales$Date,"%m-%Y"),las=2,cex.axis=.5)
for(i in 2:length(pop.evsales)) {
lines(pop.evsales[,1],pop.evsales[,i],col=colors.plot2[i-1],lwd=2.5)
}
legend("topleft",c("Volt (PHEV)","Leaf (BEV)","Prius (PHEV)","Model S (BEV)","Other"),col=colors.plot2,lwd=2,cex=.7)
dev.off()
file.copy('figures/popular_ev_sales_bymodel.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/popular_ev_sales_bymodel.pdf')
pdf('figures/forslides/popular_ev_sales_bymodel.pdf',height=6,width=6)
par(mar=c(6,6,2,2))
plot(volt~date,pop.evsales,type="n",xlab="Date (monthly)",ylab="EV Sales (monthly)",ylim=c(min(pop.evsales[,-1]),max(pop.evsales[-1])),cex.lab=1,xaxt='n',yaxt='n')
axis(1,las=0,at=pop.evsales$date[format(pop.evsales$date,"%m")=='01'|format(pop.evsales$date,"%m")=='07'],labels=format(pop.evsales$date[format(pop.evsales$date,"%m")=='01'|format(pop.evsales$date,"%m")=='07'],"%m-%Y"),cex=.8,tck=.02)
axis(2,las=2,tck=.02)
#axis(1,ev.sales[,1],format(ev.sales$Date,"%m-%Y"),las=2,cex.axis=.5)
for(i in 2:length(pop.evsales)) {
lines(pop.evsales[,1],pop.evsales[,i],col=colors.plot2[i-1],lwd=2.5)
}
legend("topleft",c("Volt (PHEV)","Leaf (BEV)","Prius (PHEV)","Model S (BEV)","Other"),col=colors.plot2,lwd=2,cex=.7)
dev.off()
#####################################################################################################
#Extra
#plot total electric vehicle sales
pdf('figures/ev_sales_total.pdf')
plot(Chevrolet.Volt~Date,ev.sales,type="n",xlab="Date (monthly)",ylab="Total EV Sales",ylim=c(0,max(rowSums(ev.sales[,2:length(ev.sales)]))),xaxt='n',yaxt='n')
axis(1,las=0)
axis(2,las=2)
lines(ev.sales[,1],rowSums(ev.sales[,2:length(ev.sales)]))
dev.off()
file.copy('figures/ev_sales_total.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/ev_sales_total.pdf')
#load hybrid vehicle sales
hev.sales <- read.csv('inputs/hybrid_sales.csv')
hev.sales[is.na(hev.sales)] <- 0
hev.sales$Date <- as.Date(hev.sales$Date,"%m/%d/%Y")
#plot hybrid vehicle sales by model
colors.plot3 <- c("blue4","red2")
pdf('figures/hev_sales_bymodel.pdf')
plot(Toyota.Prius~Date,hev.sales,type="n",xlab="Date (monthly)",ylab="HEV Sales",ylim=c(min(hev.sales[,-1]),max(ev.sales[-1])),xaxt='n',yaxt='n')
axis(1,las=0)
axis(2,las=2)
#axis(1,ev.sales[,1],format(ev.sales$Date,"%m-%Y"),las=2,cex.axis=.5)
for(i in 2:length(hev.sales)) {
lines(hev.sales[,1],hev.sales[,i],col=colors.plot3[i-1],lwd=2.5)
}
legend("topleft",c("Prius","Insight"),col=colors.plot3,lwd=2,cex=.7)
dev.off()
file.copy('figures/hev_sales_bymodel.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/hev_sales_bymodel.pdf')
#plot total electric vehicle sales vs hybrid electric vehicle sales
pdf('figures/combined_sales_total.pdf')
plot(1:length(ev.sales$Chevrolet.Volt),ev.sales$Chevrolet.Volt,type="n",xlab="Months Since Introduction",ylab="Total EV Sales",ylim=c(0,max(rowSums(ev.sales[,2:length(ev.sales)]))),xaxt='n',yaxt='n')
axis(1,las=0)
axis(2,las=2)
lines(1:length(rowSums(ev.sales[,2:length(ev.sales)])),rowSums(ev.sales[,2:length(ev.sales)]),col="blue4",lwd=2.5)
lines(1:length(rowSums(hev.sales[,2:length(hev.sales)])),rowSums(hev.sales[,2:length(hev.sales)]),col="red2",lwd=2.5)
legend("topleft",c("EVs","HEVs"),col=c("blue4","red2"),lwd=2)
dev.off()
file.copy('figures/combined_sales_total.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/combined_sales_total.pdf')
#####################################################################################################
#eia projection
projections <- read.csv('inputs/eia_projection.csv')
eia.2012 <- projections[,1:5]
eia.2013 <- projections[,c(1,6:9)]
colnames(eia.2012)=c("Year","BEV100","PHEV10","PHEV40","FFV")
colnames(eia.2013)=c("Year","BEV100","PHEV10","PHEV40","FFV")
#annual historical EV sales
annual.bevsales <- data.frame('year'=c(2011,2012),'sales'=c(sum(rowSums(ev.sales[-1][,c(2:5,7:14)])[1:13]),sum(rowSums(ev.sales[-1][,c(2:5,7:14)])[14:25])))
annual.phev10sales <- data.frame('year'=c(2011,2012),'sales'=c(sum(ev.sales[,7][1:13]),sum(ev.sales[,7][14:25])))
annual.phev40sales <- data.frame('year'=c(2011,2012),'sales'=c(sum(ev.sales[,2][1:13]),sum(ev.sales[,2][14:25])))
pdf('figures/eia_forecast2013.pdf')
par(mar=c(6,6,2,2))
plot(1,type="n",xlab="Year",ylab="",lwd=3.5,col="dodgerblue4",xlim=c(2011,2025),ylim=c(0,max(eia.2013[,2:4]/1000)),xaxt="n",yaxt="n",cex.lab=2)
grid()
axis(1,cex.axis=1,las=0,at=2011:2025,labels=2011:2025,tck=.02,cex.axis=2)
axis(2,cex.axis=1,las=2,tck=.02,cex.axis=2)
mtext('Sales (thousands)',side=2,line=4,cex=2)
abline(v=2012)
lines(eia.2013$Year,eia.2013$BEV100/1000,lty=1,lwd=3.5,col="dodgerblue4")
lines(eia.2013$Year,eia.2013$PHEV10/1000,lty=5,lwd=3.5,col="dodgerblue4")
lines(eia.2013$Year,eia.2013$PHEV40/1000,lty=3,lwd=3.5,col="dodgerblue4")
lines(2011:2012,annual.bevsales$sales/1000,lty=1,lwd=3.5,col='red')
lines(2011:2012,annual.phev10sales$sales/1000,lty=5,lwd=3.5,col='red')
lines(2011:2012,annual.phev40sales$sales/1000,lty=3,lwd=3.5,col='red')
text(x=c(2019,2015,2021.8),y=c(15,30,120),pos=c(4,4,4),labels=c("BEV-100","PHEV-10","PHEV-40"))
text(x=2012,y=150,pos=2,labels="Historical Sales",srt=90)
text(x=2012,y=150,pos=4,labels="Projected Sales",srt=270)
arrows(2012,100,2011,100,length=.1,lwd=1.5)
arrows(2012,155,2013,155,length=.1,lwd=1.5)
dev.off()
file.copy('figures/eia_forecast2013.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/eia_forecast2013.pdf')
pdf('figures/forslides/eia_forecast2013.pdf',height=6,width=6)
par(mar=c(6,6,2,2))
plot(1,type="n",xlab="Year",ylab="Sales (thousands)",lwd=3.5,col="dodgerblue4",xlim=c(2011,2025),ylim=c(0,max(eia.2013[,2:4]/1000)),xaxt="n",yaxt="n",cex.lab=1.5)
grid()
axis(1,cex.axis=1,las=0,at=2011:2025,labels=2011:2025)
axis(2,cex.axis=1,las=2)
abline(v=2012)
lines(eia.2013$Year,eia.2013$BEV100/1000,lty=1,lwd=3.5,col="dodgerblue4")
lines(eia.2013$Year,eia.2013$PHEV10/1000,lty=5,lwd=3.5,col="dodgerblue4")
lines(eia.2013$Year,eia.2013$PHEV40/1000,lty=3,lwd=3.5,col="dodgerblue4")
lines(2011:2012,annual.bevsales$sales/1000,lty=1,lwd=3.5,col='red')
lines(2011:2012,annual.phev10sales$sales/1000,lty=5,lwd=3.5,col='red')
lines(2011:2012,annual.phev40sales$sales/1000,lty=3,lwd=3.5,col='red')
text(x=c(2019,2015,2021.8),y=c(15,30,120),pos=c(4,4,4),labels=c("BEV-100","PHEV-10","PHEV-40"))
text(x=2012,y=150,pos=2,labels="Historical Sales",srt=90)
text(x=2012,y=150,pos=4,labels="Projected Sales",srt=270)
arrows(2012,100,2011,100,length=.1,lwd=1.5)
arrows(2012,155,2013,155,length=.1,lwd=1.5)
dev.off()
#####################################################################################################
#Policy forecasts
forecasts2 <- read.csv('inputs/forecasts_v2.csv')
grey.shade = rgb(0,0,0,alpha=0.2,maxColorValue=1)
pdf('figures/forecasts.pdf',height=6,width=9)
par(mar=c(4,6,2,2))
plot(x=forecasts2$Year,y=1:nrow(forecasts2),type='n',xlab=NA,ylab=NA,xaxt='n',yaxt='n',xlim=c(2008,2055),ylim=c(0,1))
grid()
axis(1,las=0,tck=.02,cex.axis=1.5)
axis(2,las=2,tck=.02,cex.axis=1.5)
mtext('Year',side=1,line=2.5,cex=1.5)
mtext('Market Share',side=2,line=3.5,cex=1.5)
polygon(x=c(2012,2020,2050,2050,2020,2010),y=c(0,.55,1,.17,0,0),col=grey.shade,border=NA)
for(column in 2:ncol(forecasts2)){
lines(x=na.omit(forecasts2[,c(1,column)])$Year,y=na.omit(forecasts2[,column]),lwd=2,lty=column,col='grey30')
if(column==3) {
text(x=tail(na.omit(forecasts2[,c(1,column)])$Year,n=1),y=tail(na.omit(forecasts2[,column]),n=1)-.03,labels=labels(forecasts2)[[2]][column],cex=.8,pos=1)
}
else {
text(x=tail(na.omit(forecasts2[,c(1,column)])$Year,n=1),y=tail(na.omit(forecasts2[,column]),n=1),labels=labels(forecasts2)[[2]][column],cex=.8,pos=4)
}
}
text(x=2040,y=.4,pos=4,labels='FORECASTS',font=2)
dev.off()
#####################################################################################################
#Chloropleth Maps
#import data
camry <- read.csv("inputs/camry.csv")
civic <- read.csv("inputs/civic.csv")
f150 <- read.csv("inputs/f150.csv")
leaf <- read.csv("inputs/leaf.csv")
prius <- read.csv("inputs/prius.csv")
prius.phev <- read.csv("inputs/priusphev.csv")
volt <- read.csv("inputs/volt.csv")
#state names
mapnames <- map("state",plot=FALSE)$names
mapnames.state <- ifelse(regexpr(":",mapnames) < 0,mapnames,
substr(mapnames, 1, regexpr(":",mapnames)-1))
#convert states to lowercase
camry$State <- tolower(camry$State)
civic$State <- tolower(civic$State)
f150$State <- tolower(f150$State)
leaf$State <- tolower(leaf$State)
prius$State <- tolower(prius$State)
prius.phev$State <- tolower(prius.phev$State)
volt$State <- tolower(volt$State)
#chloropleth quantities
color.split <- c(1500,3000,4500,6000)
#camry choropleth map
for(i in 1:nrow(camry)) {
if(camry$Total[i]>color.split[4]) {
camry$color[i] <- "blue4"
}
else if(camry$Total[i]>color.split[3]) {
camry$color[i] <- "blue2"
}
else if(camry$Total[i]>color.split[2]) {
camry$color[i] <- "dodgerblue"
}
else if(camry$Total[i]>color.split[1]) {
camry$color[i] <- "lightskyblue"
}
else {
camry$color[i] <- "white"
}
}
pdf('figures/camry_fees.pdf')
map("state",fill=TRUE,col=camry$color[match(mapnames.state,camry$State)])
raincol <- c("purple","red","darkgreen","black","yellow")
draw.pie(state.center$x,state.center$y,as.matrix(camry[,c(3,4,5,6,7)]),radius=.65,col=raincol,scale=FALSE)
#legend("bottomleft",leg=c("Federal Fuel Tax","State Fuel Tax","Registration Fees","Title Fees","Inspection Fees"),fill=c("purple","red","darkgreen","black","yellow"),cex=0.7,title="Fee Breakdown")
#legend("bottomright",leg=c('0-1499','1500-2999','3000-4499','4500-5999','>6000'),fill=c("white","lightskyblue","dodgerblue","blue2","blue4"),title="Total Fees ($)",cex=.7)
dev.off()
file.copy('figures/camry_fees.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/camry_fees.pdf')
#civic choropleth map
for(i in 1:nrow(civic)) {
if(civic$Total[i]>color.split[4]) {
civic$color[i] <- "blue4"
}
else if(civic$Total[i]>color.split[3]) {
civic$color[i] <- "blue2"
}
else if(civic$Total[i]>color.split[2]) {
civic$color[i] <- "dodgerblue"
}
else if(civic$Total[i]>color.split[1]) {
civic$color[i] <- "lightskyblue"
}
else {
civic$color[i] <- "white"
}
}
pdf('figures/civic_fees.pdf')
map("state",fill=TRUE,col=civic$color[match(mapnames.state,civic$State)])
raincol <- c("purple","red","darkgreen","black","yellow")
draw.pie(state.center$x,state.center$y,as.matrix(civic[,c(3,4,5,6,7)]),radius=.65,col=raincol,scale=FALSE)
#legend("bottomleft",leg=c("Federal Fuel Tax","State Fuel Tax","Registration Fees","Title Fees","Inspection Fees"),fill=c("purple","red","darkgreen","black","yellow"),cex=0.7,title="Fee Breakdown")
#legend("bottomright",leg=c('0-1499','1500-2999','3000-4499','4500-5999','>6000'),fill=c("white","lightskyblue","dodgerblue","blue2","blue4"),title="Total Fees ($)",cex=.7)
dev.off()
file.copy('figures/civic_fees.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/civic_fees.pdf')
#f150 choropleth map
for(i in 1:nrow(f150)) {
if(f150$Total[i]>color.split[4]) {
f150$color[i] <- "blue4"
}
else if(f150$Total[i]>color.split[3]) {
f150$color[i] <- "blue2"
}
else if(f150$Total[i]>color.split[2]) {
f150$color[i] <- "dodgerblue"
}
else if(f150$Total[i]>color.split[1]) {
f150$color[i] <- "lightskyblue"
}
else {
f150$color[i] <- "white"
}
}
pdf('figures/f150_fees.pdf')
map("state",fill=TRUE,col=f150$color[match(mapnames.state,f150$State)])
raincol <- c("purple","red","darkgreen","black","yellow")
draw.pie(state.center$x,state.center$y,as.matrix(f150[,c(3,4,5,6,7)]),radius=.65,col=raincol,scale=FALSE)
#legend("bottomleft",leg=c("Federal Fuel Tax","State Fuel Tax","Registration Fees","Title Fees","Inspection Fees"),fill=c("purple","red","darkgreen","black","yellow"),cex=0.7,title="Fee Breakdown")
#legend("bottomright",leg=c('0-1499','1500-2999','3000-4499','4500-5999','>6000'),fill=c("white","lightskyblue","dodgerblue","blue2","blue4"),title="Total Fees ($)",cex=.7)
dev.off()
file.copy('figures/f150_fees.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/f150_fees.pdf')
#leaf choropleth map
for(i in 1:nrow(leaf)) {
if(leaf$Total[i]>color.split[4]) {
leaf$color[i] <- "blue4"
}
else if(leaf$Total[i]>color.split[3]) {
leaf$color[i] <- "blue2"
}
else if(leaf$Total[i]>color.split[2]) {
leaf$color[i] <- "dodgerblue"
}
else if(leaf$Total[i]>color.split[1]) {
leaf$color[i] <- "lightskyblue"
}
else {
leaf$color[i] <- "white"
}
}
pdf('figures/leaf_fees.pdf')
map("state",fill=TRUE,col=leaf$color[match(mapnames.state,leaf$State)])
raincol <- c("purple","red","darkgreen","black","yellow")
draw.pie(state.center$x,state.center$y,as.matrix(leaf[,c(3,4,5,6,7)]),radius=.65,col=raincol,scale=FALSE)
#legend("bottomleft",leg=c("Federal Fuel Tax","State Fuel Tax","Registration Fees","Title Fees","Inspection Fees"),fill=c("purple","red","darkgreen","black","yellow"),cex=0.7,title="Fee Breakdown")
#legend("bottomright",leg=c('0-1499','1500-2999','3000-4499','4500-5999','>6000'),fill=c("white","lightskyblue","dodgerblue","blue2","blue4"),title="Total Fees ($)",cex=.7)
dev.off()
file.copy('figures/leaf_fees.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/leaf_fees.pdf')
#prius choropleth map
for(i in 1:nrow(prius)) {
if(prius$Total[i]>color.split[4]) {
prius$color[i] <- "blue4"
}
else if(prius$Total[i]>color.split[3]) {
prius$color[i] <- "blue2"
}
else if(prius$Total[i]>color.split[2]) {
prius$color[i] <- "dodgerblue"
}
else if(prius$Total[i]>color.split[1]) {
prius$color[i] <- "lightskyblue"
}
else {
prius$color[i] <- "white"
}
}
pdf('figures/prius_fees.pdf')
map("state",fill=TRUE,col=prius$color[match(mapnames.state,prius$State)])
raincol <- c("purple","red","darkgreen","black","yellow")
draw.pie(state.center$x,state.center$y,as.matrix(prius[,c(3,4,5,6,7)]),radius=.65,col=raincol,scale=FALSE)
#legend("bottomleft",leg=c("Federal Fuel Tax","State Fuel Tax","Registration Fees","Title Fees","Inspection Fees"),fill=c("purple","red","darkgreen","black","yellow"),cex=0.7,title="Fee Breakdown")
#legend("bottomright",leg=c('0-1499','1500-2999','3000-4499','4500-5999','>6000'),fill=c("white","lightskyblue","dodgerblue","blue2","blue4"),title="Total Fees ($)",cex=.7)
dev.off()
file.copy('figures/prius_fees.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/prius_fees.pdf')
#prius phev choropleth map
for(i in 1:nrow(prius.phev)) {
if(prius.phev$Total[i]>color.split[4]) {
prius.phev$color[i] <- "blue4"
}
else if(prius.phev$Total[i]>color.split[3]) {
prius.phev$color[i] <- "blue2"
}
else if(prius.phev$Total[i]>color.split[2]) {
prius.phev$color[i] <- "dodgerblue"
}
else if(prius.phev$Total[i]>color.split[1]) {
prius.phev$color[i] <- "lightskyblue"
}
else {
prius.phev$color[i] <- "white"
}
}
pdf('figures/priusphev_fees.pdf')
map("state",fill=TRUE,col=prius.phev$color[match(mapnames.state,prius.phev$State)])
raincol <- c("purple","red","darkgreen","black","yellow")
draw.pie(state.center$x,state.center$y,as.matrix(prius.phev[,c(3,4,5,6,7)]),radius=.65,col=raincol,scale=FALSE)
#legend("bottomleft",leg=c("Federal Fuel Tax","State Fuel Tax","Registration Fees","Title Fees","Inspection Fees"),fill=c("purple","red","darkgreen","black","yellow"),cex=0.7,title="Fee Breakdown")
#legend("bottomright",leg=c('0-1499','1500-2999','3000-4499','4500-5999','>6000'),fill=c("white","lightskyblue","dodgerblue","blue2","blue4"),title="Total Fees ($)",cex=.7)
dev.off()
file.copy('figures/priusphev_fees.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/priusphev_fees.pdf')
#volt choropleth map
for(i in 1:nrow(volt)) {
if(volt$Total[i]>color.split[4]) {
volt$color[i] <- "blue4"
}
else if(volt$Total[i]>color.split[3]) {
volt$color[i] <- "blue2"
}
else if(volt$Total[i]>color.split[2]) {
volt$color[i] <- "dodgerblue"
}
else if(volt$Total[i]>color.split[1]) {
volt$color[i] <- "lightskyblue"
}
else {
volt$color[i] <- "white"
}
}
pdf('figures/volt_fees.pdf')
map("state",fill=TRUE,col=volt$color[match(mapnames.state,volt$State)])
raincol <- c("purple","red","darkgreen","black","yellow")
draw.pie(state.center$x,state.center$y,as.matrix(volt[,c(3,4,5,6,7)]),radius=.65,col=raincol,scale=FALSE)
#legend("bottomleft",leg=c("Federal Fuel Tax","State Fuel Tax","Registration Fees","Title Fees","Inspection Fees"),fill=c("purple","red","darkgreen","black","yellow"),cex=0.7,title="Fee Breakdown")
#legend("bottomright",leg=c('0-1499','1500-2999','3000-4499','4500-5999','>6000'), fill=c("white","lightskyblue","dodgerblue","blue2","blue4"),title="Total Fees ($)",cex=.7)
dev.off()
file.copy('figures/volt_fees.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/volt_fees.pdf')
pdf('figures/map_legend.pdf')
plot(x=0:50,y=0:50,xlab='',ylab='',type='n',xaxt='n',yaxt='n',bty='n')
legend(x=0,y=50,leg=c("Federal Fuel Tax","State Fuel Tax","Registration Fees","Title Fees","Inspection Fees"),
fill=c("purple","red","darkgreen","black","yellow"),cex=0.7,title="Fee Breakdown",box.lwd = 0,box.col = "white",bg = "white")
legend(x=15,y=50,leg=c('0-1499','1500-2999','3000-4499','4500-5999','>6000'),
fill=c("white","lightskyblue","dodgerblue","blue2","blue4"),title="Total Fees ($)",cex=.7,box.lwd = 0,box.col = "white",bg = "white",ncol=2)
dev.off()
file.copy('figures/map_legend.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/map_legend.pdf')
#####################################################################################################
#Differences map
diff.f150.leaf <- data.frame('State'=f150$State,'Total'=f150$Total-leaf$Total)
color.split2 <- c(1000,2000,3000,4000)
for(i in 1:nrow(diff.f150.leaf)) {
if(diff.f150.leaf$Total[i]>color.split2[4]) {
diff.f150.leaf$color[i] <- "red4"
}
else if(diff.f150.leaf$Total[i]>color.split2[3]) {
diff.f150.leaf$color[i] <- "red1"
}
else if(diff.f150.leaf$Total[i]>color.split2[2]) {
diff.f150.leaf$color[i] <- "indianred2"
}
else if(diff.f150.leaf$Total[i]>color.split2[1]) {
diff.f150.leaf$color[i] <- "pink1"
}
else {
diff.f150.leaf$color[i] <- "white"
}
}
pdf('figures/diff_fees.pdf')
map("state",fill=TRUE,col=diff.f150.leaf$color[match(mapnames.state,diff.f150.leaf$State)])
legend("bottomright",leg=c('0-999','1000-1999','2000-2999','3000-3999','>4000'),
fill=rev(c("red4","red1","indianred2","pink1","white")),title="Revenue Difference ($)",bty='n',cex=.7)
dev.off()
file.copy('figures/diff_fees.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/diff_fees.pdf')
#####################################################################################################
#####################################################################################################
#Prius distribution for appendix
#load state sale distribution data
state.percents <- read.csv('inputs/prius_statepercentages.csv')
#total and percent diff by state
state.percents.total <- read.csv('inputs/total_statepercentages.csv')
state.percents.diff <- read.csv('inputs/percentdiff_bystate.csv',header=FALSE)
#chloropleth map of prius state sales distribution
priussales <- data.frame("state"=camry$State,"percent"=colMeans(state.percents))
priussales.quantiles <- quantile(colMeans(state.percents),probs=c(.2,.4,.6,.8))
for(i in 1:nrow(priussales)) {
if(priussales$percent[i]>priussales.quantiles[4]) {
priussales$color[i] <- "blue4"
}
else if(priussales$percent[i]>priussales.quantiles[3]) {
priussales$color[i] <- "blue2"
}
else if(priussales$percent[i]>priussales.quantiles[2]) {
priussales$color[i] <- "dodgerblue"
}
else if(priussales$percent[i]>priussales.quantiles[1]) {
priussales$color[i] <- "lightskyblue"
}
else {
priussales$color[i] <- "white"
}
}
pdf('figures/priussales.pdf')
map("state",fill=TRUE,col=priussales$color[match(mapnames.state,priussales$state)])
legend("bottomright",leg=c(as.expression(paste(0," - ",signif(priussales.quantiles[1],3))),
as.expression(paste(signif(priussales.quantiles[1]*100,3)," - ",signif(priussales.quantiles[2]*100,3))),
as.expression(paste(signif(priussales.quantiles[2]*100,3)," - ",signif(priussales.quantiles[3]*100,3))),
as.expression(paste(signif(priussales.quantiles[3]*100,3)," - ",signif(priussales.quantiles[4]*100,3))),
as.expression(paste(signif(priussales.quantiles[4]*100,3)," - ",signif(max(priussales$percent)*100,3)))),
fill=c("white","lightskyblue","dodgerblue","blue2","blue4"),title="Sales Distribution (%)",cex=.7)
dev.off()
file.copy('figures/priussales.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/priussales.pdf')
#chloropleth map of total state sales distribution
totalsales <- data.frame("state"=camry$State,"percent"=colMeans(state.percents.total))
totalsales.quantiles <- quantile(colMeans(state.percents.total),probs=c(.2,.4,.6,.8))
for(i in 1:nrow(totalsales)) {
if(totalsales$percent[i]>totalsales.quantiles[4]) {
totalsales$color[i] <- "blue4"
}
else if(totalsales$percent[i]>totalsales.quantiles[3]) {
totalsales$color[i] <- "blue2"
}
else if(totalsales$percent[i]>totalsales.quantiles[2]) {
totalsales$color[i] <- "dodgerblue"
}
else if(totalsales$percent[i]>totalsales.quantiles[1]) {
totalsales$color[i] <- "lightskyblue"
}
else {
totalsales$color[i] <- "white"
}
}
pdf('figures/totalsales.pdf')
map("state",fill=TRUE,col=totalsales$color[match(mapnames.state,totalsales$state)])
legend("bottomright",leg=c(as.expression(paste(0," - ",signif(totalsales.quantiles[1],3))),
as.expression(paste(signif(totalsales.quantiles[1]*100,3)," - ",signif(totalsales.quantiles[2]*100,3))),
as.expression(paste(signif(totalsales.quantiles[2]*100,3)," - ",signif(totalsales.quantiles[3]*100,3))),
as.expression(paste(signif(totalsales.quantiles[3]*100,3)," - ",signif(totalsales.quantiles[4]*100,3))),
as.expression(paste(signif(totalsales.quantiles[4]*100,3)," - ",signif(max(totalsales$percent)*100,3)))),
fill=c("white","lightskyblue","dodgerblue","blue2","blue4"),title="Sales Distribution (%)",cex=.7)
dev.off()
file.copy('figures/totalsales.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/totalsales.pdf')
#chloropleth map of difference in state sales distribution
diffsales <- data.frame("state"=camry$State,"percent"=state.percents.diff*-1)
colnames(diffsales) <- c("state","percent")
diffsales.quantiles <- quantile(diffsales$percent,probs=c(.2,.4,.6,.8))
for(i in 1:nrow(diffsales)) {
if(diffsales$percent[i]>diffsales.quantiles[4]) {
diffsales$color[i] <- "lightskyblue"
}
else if(diffsales$percent[i]>diffsales.quantiles[3]) {
diffsales$color[i] <- "white"
}
else if(diffsales$percent[i]>diffsales.quantiles[2]) {
diffsales$color[i] <- "lightpink"
}
else if(diffsales$percent[i]>diffsales.quantiles[1]) {
diffsales$color[i] <- "lightcoral"
}
else {
diffsales$color[i] <- "red"
}
}
pdf('figures/diffsales.pdf')
map("state",fill=TRUE,col=diffsales$color[match(mapnames.state,diffsales$state)])
legend("bottomright",leg=c(as.expression(paste(signif(min(diffsales$percent)*100,3)," - ",signif(diffsales.quantiles[1],3))),
as.expression(paste(signif(diffsales.quantiles[1]*100,3)," - ",signif(diffsales.quantiles[2]*100,3))),
as.expression(paste(signif(diffsales.quantiles[2]*100,3)," - ",signif(diffsales.quantiles[3]*100,3))),
as.expression(paste(signif(diffsales.quantiles[3]*100,3)," - ",signif(diffsales.quantiles[4]*100,3))),
as.expression(paste(signif(diffsales.quantiles[4]*100,3)," - ",signif(max(diffsales$percent)*100,3)))),
fill=c("red","lightcoral","lightpink","white","lightskyblue"),title="Sales Distribution (%)",cex=.7)
dev.off()
file.copy('figures/diffsales.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/diffsales.pdf')
#####################################################################################################
#####################################################################################################
#EIA AEO by region
region.list <- c('east.north.central','east.south.central','middle.atlantic','mountain','newengland','pacific','south.atlantic','west.north.central','west.south.central')
extract.2013proj <- function(inputtable) {
holdtable <- read.csv(inputtable,header=FALSE)
bev <- numeric(0)
phev10 <- numeric(0)
phev40 <- numeric(0)
ffv <- numeric(0)
for(i in seq(5,31,by=2)) {
bev <- c(bev,as.numeric(levels(as.ordered(holdtable[[i]][15]))))
phev10 <- c(phev10,as.numeric(levels(as.ordered(holdtable[[i]][17]))))
phev40 <- c(phev40,as.numeric(levels(as.ordered(holdtable[[i]][18]))))
ffv <- c(ffv,as.numeric(levels(as.ordered(holdtable[[i]][14]))))
}
out <- data.frame('year'=2012:2025,'BEV100'=round(bev*1000,0),'PHEV10'=round(phev10*1000,0),'PHEV40'=round(phev40*1000,0),'FFV'=round(ffv*1000,0))
return(out)
}
for(i in 1:length(region.list)) {
assign(paste(region.list[i]),extract.2013proj(paste('inputs/aeo_byregion/',region.list[i],'.csv',sep='')))
}
#####################################################################################################
#####################################################################################################
#Projected sales by state
#Prius sales for proxy, matrix for drawing values
prius.sales <- read.csv('inputs/priussales_bystate.csv',header=TRUE)
prius.sales <- prius.sales[-nrow(prius.sales),]
future.sales.bystate <- function() {
#states in each region
pacific.states <- c('ALASKA','WASHINGTON','OREGON','CALIFORNIA','HAWAII')
mountain.states <- c('MONTANA','IDAHO','NEVADA','UTAH','WYOMING','COLORADO','ARIZONA','NEW.MEXICO')
west.north.central.states <- c('NORTH.DAKOTA','MINNESOTA','SOUTH.DAKOTA','NEBRASKA','KANSAS','IOWA','MISSOURI')
west.south.central.states <- c('TEXAS','OKLAHOMA','ARKANSAS','LOUISIANA')
east.north.central.states <- c('WISCONSIN','MICHIGAN','ILLINOIS','INDIANA','OHIO')
east.south.central.states <- c('KENTUCKY','TENNESSEE','MISSISSIPPI','ALABAMA')
south.atlantic.states <- c('WEST.VIRGINIA','MARYLAND','DELAWARE','VIRGINIA','NORTH.CAROLINA','SOUTH.CAROLINA','GEORGIA','FLORIDA')
middle.atlantic.states <- c('PENNSYLVANIA','NEW.YORK','NEW.JERSEY')
newengland.states <- c('MAINE','VERMONT','NEW.HAMPSHIRE','MASSACHUSETTS','RHODE.ISLAND','CONNECTICUT')
#creating empty arrays for final list output
BEV100.out <- array(dim=c(nrow(eia.2013),50,nrow(prius.sales)))
PHEV10.out <- array(dim=c(nrow(eia.2013),50,nrow(prius.sales)))
PHEV40.out <- array(dim=c(nrow(eia.2013),50,nrow(prius.sales)))
FFV.out <- array(dim=c(nrow(eia.2013),50,nrow(prius.sales)))
for(j in 1:nrow(prius.sales)) {
hold.sales <- prius.sales[j,]
#creating empty matrices to be filled, one for each row of prius sales
BEV100.bystate <- matrix(numeric(0),nrow=nrow(eia.2013))
PHEV10.bystate <- matrix(numeric(0),nrow=nrow(eia.2013))
PHEV40.bystate <- matrix(numeric(0),nrow=nrow(eia.2013))
FFV.bystate <- matrix(numeric(0),nrow=nrow(eia.2013))
for(i in region.list) {
#takes prius sales and keeps only states within the region
hold.statesales <- hold.sales[colnames(hold.sales)%in%get(paste(i,'.states',sep=''))]
#transformation of the sales to the proportional distribution in each region
hold.statetotal <- sum(hold.statesales)
hold.statedist <- hold.statesales/hold.statetotal
for(k in colnames(eia.2013[-1])) {
#gives table of sales by state for each technology
assign(paste('hold.',k,'.bystate',sep=''),(as.matrix('[['(get(i),k),ncol=1)%*%as.matrix(hold.statedist,nrow=1)))
#appends the table to the matrix holding all other regions that have been run
assign(paste(k,'.bystate',sep=''),cbind(get(paste(k,'.bystate',sep='')),get(paste('hold.',k,'.bystate',sep=''))))
}
}
#assigns completed table (of each technology sales across 50 states) to the empty array
BEV100.out[,,j] <- BEV100.bystate
PHEV10.out[,,j] <- PHEV10.bystate
PHEV40.out[,,j] <- PHEV40.bystate
FFV.out[,,j] <- FFV.bystate
}
state.order <<- colnames(BEV100.bystate)
out <- list(BEV100.out,PHEV10.out,PHEV40.out,FFV.out)
return(out)
}
projectedsales.bystate <- future.sales.bystate()
state.order <- tolower(state.order)
state.order <- gsub('\\.',' ',state.order)
for(i in 1:ncol(eia.2013[-1])) {
assign(paste('mean.projected',colnames(eia.2013[i+1]),'.bystate',sep=''),as.data.frame(apply(projectedsales.bystate[[i]],c(1,2),mean)))
assign(paste('q25.projected',colnames(eia.2013[i+1]),'.bystate',sep=''),apply(projectedsales.bystate[[i]],c(1,2),quantile,probs=.025))
assign(paste('q975.projected',colnames(eia.2013[i+1]),'.bystate',sep=''),apply(projectedsales.bystate[[i]],c(1,2),quantile,probs=.975))
}
all.cases <- list(mean.projectedBEV100.bystate, mean.projectedPHEV10.bystate, mean.projectedPHEV40.bystate, mean.projectedFFV.bystate, q25.projectedBEV100.bystate, q25.projectedPHEV10.bystate, q25.projectedPHEV40.bystate, q25.projectedFFV.bystate, q975.projectedBEV100.bystate, q975.projectedPHEV10.bystate, q975.projectedPHEV40.bystate, q975.projectedFFV.bystate)
name.it <- function(table) {
colnames(table) <- state.order
table <- table[,order(colnames(table))]
return(table)
}
all.cases <- lapply(all.cases,name.it)
#####################################################################################################
#####################################################################################################
#Calculating projected revenue losses
future.revenue.decrease <- function(salesinput,bevrev,phev10rev,phev40rev) {
out <- list()
counter = 1
for(i in 1:length(salesinput)) {
#mod to distinguish technologies
if(i%%4 == 1) {
hold <- t(salesinput[[i]])*(camry$Total-bevrev)
out[[counter]] <- hold
counter = counter+1
}
else if(i%%4 == 2) {
hold <- t(salesinput[[i]])*(camry$Total-phev10rev)
out[[counter]] <- hold
counter = counter+1
}
else if (i%%4 ==3) {
hold <- t(salesinput[[i]])*(camry$Total-phev40rev)
out[[counter]] <- hold
counter = counter+1
}
}
return(out)
}
#list of revenue by year, state, and technology and distribution (row=state, column=year, list num=technology and dist: by 3's BEV, PHEV10, PHEV40, sets of 3 mean, q25, q975)
all.revenues.decrease <- future.revenue.decrease(all.cases,leaf$Total,prius.phev$Total,volt$Total)
#####################################################################################################
#####################################################################################################
#State level map (only needs state and technology)
#data reduction to state and technology (use only list 1-3, sum columns)
total.state.revenueloss <- data.frame('bev.loss'=rowSums(all.revenues.decrease[[1]]),'phev10.loss'=rowSums(all.revenues.decrease[[2]]),'phev40.loss'=rowSums(all.revenues.decrease[[3]]))
total.state.revenueloss$totalloss <- rowSums(total.state.revenueloss)
total.state.revenueloss$state <- rownames(total.state.revenueloss)
#assigning colors by quantiles of loss
loss.quantiles <- quantile(total.state.revenueloss$totalloss ,probs=c(.2,.4,.6,.8))
for(i in 1:nrow(total.state.revenueloss)) {
if(total.state.revenueloss$totalloss [i]>loss.quantiles[4]) {
total.state.revenueloss$color[i] <- "red4"
}
else if(total.state.revenueloss$totalloss [i]>loss.quantiles[3]) {
total.state.revenueloss$color[i] <- "red1"
}
else if(total.state.revenueloss$totalloss [i]>loss.quantiles[2]) {
total.state.revenueloss$color[i] <- "indianred2"
}
else if(total.state.revenueloss$totalloss [i]>loss.quantiles[1]) {
total.state.revenueloss$color[i] <- "pink1"
}
else {
total.state.revenueloss$color[i] <- "white"
}
}
pdf('figures/revenue_loss_bystate.pdf')
map("state",fill=TRUE,col=total.state.revenueloss$color[match(mapnames.state,total.state.revenueloss$state)])
legend("bottomright",leg=c(as.expression(paste("< ",abs(signif(loss.quantiles[1]/10^6,2)))),
as.expression(paste(abs(signif(loss.quantiles[1]/10^6,2))," - ",abs(signif(loss.quantiles[2]/10^6,2)))),
as.expression(paste(abs(signif(loss.quantiles[2]/10^6,2))," - ",abs(signif(loss.quantiles[3]/10^6,2)))),
as.expression(paste(abs(signif(loss.quantiles[3]/10^6,2))," - ",abs(signif(loss.quantiles[4]/10^6,2)))),
as.expression(paste(abs(signif(loss.quantiles[4]/10^6,2))," - ",abs(signif(max(total.state.revenueloss$totalloss)/10^6,2))))),
fill=rev(c("red4","red1","indianred2","pink1","white")),title="Revenue Loss \n(millions of $)",bty='n',cex=.7)
dev.off()
file.copy('figures/revenue_loss_bystate.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/revenue_loss_bystate.pdf')
#####################################################################################################
#Normalized losses (to 2012 population)
#reading in data on 2012 population
population.bystate.2012 <- read.csv('inputs/2012_population.csv',header=TRUE)
total.state.revenueloss$normalizedloss <- total.state.revenueloss$totalloss/population.bystate.2012$Population
#assigning color2s by quantiles of loss
normalizedloss.quantiles <- quantile(total.state.revenueloss$normalizedloss ,probs=c(.2,.4,.6,.8))
for(i in 1:nrow(total.state.revenueloss)) {
if(total.state.revenueloss$normalizedloss [i]>normalizedloss.quantiles[4]) {
total.state.revenueloss$color2[i] <- "red4"
}
else if(total.state.revenueloss$normalizedloss [i]>normalizedloss.quantiles[3]) {
total.state.revenueloss$color2[i] <- "red1"
}
else if(total.state.revenueloss$normalizedloss [i]>normalizedloss.quantiles[2]) {
total.state.revenueloss$color2[i] <- "indianred2"
}
else if(total.state.revenueloss$normalizedloss [i]>normalizedloss.quantiles[1]) {
total.state.revenueloss$color2[i] <- "pink1"
}
else {
total.state.revenueloss$color2[i] <- "white"
}
}
pdf('figures/revenue_loss_bystate_normalized.pdf')
map("state",fill=TRUE,col=total.state.revenueloss$color2[match(mapnames.state,total.state.revenueloss$state)])
legend("bottomright",leg=c(as.expression(paste("< ",abs(signif(normalizedloss.quantiles[1],2)))),
as.expression(paste(abs(signif(normalizedloss.quantiles[1],2))," - ",abs(signif(normalizedloss.quantiles[2],2)))),
as.expression(paste(abs(signif(normalizedloss.quantiles[2],2))," - ",abs(signif(normalizedloss.quantiles[3],2)))),
as.expression(paste(abs(signif(normalizedloss.quantiles[3],2))," - ",abs(signif(normalizedloss.quantiles[4],2)))),
as.expression(paste(abs(signif(normalizedloss.quantiles[4],2))," - ",abs(signif(max(total.state.revenueloss$normalizedloss),2))))),
fill=rev(c("red4","red1","indianred2","pink1","white")),title="Per Person \nRevenue Loss ($)",bty='n',cex=.7)
dev.off()
file.copy('figures/revenue_loss_bystate_normalized.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/revenue_loss_bystate_normalized.pdf')
#####################################################################################################
#####################################################################################################
#Calculating revenue loss over time
cumulative.loss.byyear <- data.frame('mean.loss.bev'=colSums(all.revenues.decrease[[1]]),'mean.loss.phev10'=colSums(all.revenues.decrease[[2]]),'mean.loss.phev40'=colSums(all.revenues.decrease[[3]]),'q25.loss.bev'=colSums(all.revenues.decrease[[4]]),'q25.loss.phev10'=colSums(all.revenues.decrease[[5]]),'q25.loss.phev40'=colSums(all.revenues.decrease[[6]]),'q975.loss.bev'=colSums(all.revenues.decrease[[7]]),'q975.loss.phev10'=colSums(all.revenues.decrease[[8]]),'q975.loss.phev40'=colSums(all.revenues.decrease[[9]]))
#distributing losses evenly
stagger.add <- function(input,lifetime=12) {
temp <- input/lifetime
hold <- rep(0,length(temp))
for(i in 1:lifetime) {
hold <- temp+hold
hold <- c(hold,0)
temp <- c(0,temp)
}
hold <- hold[-length(hold)]
return(hold)
}
annualized.totalloss <- lapply(cumulative.loss.byyear,stagger.add)
annualized.totalloss.all <- data.frame('mean.total'=annualized.totalloss[[1]]+annualized.totalloss[[2]]+annualized.totalloss[[3]],'q25.total'=annualized.totalloss[[4]]+annualized.totalloss[[5]]+annualized.totalloss[[6]],'q975.total'=annualized.totalloss[[7]]+annualized.totalloss[[8]]+annualized.totalloss[[9]])
#plotting total revenue decreases
pdf('figures/annual_revenueloss.pdf')
par(mar=c(6,4,2,2))
plot(2012:2025,annualized.totalloss[[1]][1:14]/10^6,ylim=c(0,max(annualized.totalloss.all)/10^6),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss (millions of $)')
grid()
axis(1,las=0,tck=.02)
axis(2,las=2,tck=.02)
lines(2012:2025,annualized.totalloss.all[1:14,1]/10^6,lwd=2)
lines(2012:2025,annualized.totalloss.all[1:14,2]/10^6,lwd=1.5,lty=2)
lines(2012:2025,annualized.totalloss.all[1:14,3]/10^6,lwd=1.5,lty=2)
for(i in 1:length(annualized.totalloss)) {
if(1<=i&i<=3) {
line.type <- 1
if(i%%3==1) {
color.assignment <- 'red'
lines(2012:2025,annualized.totalloss[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=2)
}
else if(i%%3==2) {
color.assignment <- 'blue'
lines(2012:2025,annualized.totalloss[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=2)
}
else {
color.assignment <- 'forestgreen'
lines(2012:2025,annualized.totalloss[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=2)
}
}
else {
line.type <- 2
if(i%%3==1) {
color.assignment <- 'red'
lines(2012:2025,annualized.totalloss[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=1.5)
}
else if(i%%3==2) {
color.assignment <- 'blue'
lines(2012:2025,annualized.totalloss[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=1.5)
}
else {
color.assignment <- 'forestgreen'
lines(2012:2025,annualized.totalloss[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=1.5)
}
}
}
legend("topleft",c('Total','BEV','PHEV-10','PHEV-40'),lty=c(1,1,1,1),col=c('black','red','blue','forestgreen'),bg='white')
dev.off()
file.copy('figures/annual_revenueloss.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/annual_revenueloss.pdf')
#for 2x2 grid
pdf('figures/annual_revenueloss_total.pdf')
par(mar=c(6,8,2,2))
plot(2012:2025,annualized.totalloss[[1]][1:14]/10^6,ylim=c(0,max(annualized.totalloss.all)/10^6),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss\n(millions of $)',cex.lab=2)
grid()
axis(1,las=0,tck=.02,cex.axis=1.5)
axis(2,las=2,tck=.02,cex.axis=1.5)
lines(2012:2025,annualized.totalloss.all[1:14,1]/10^6,lwd=2)
lines(2012:2025,annualized.totalloss.all[1:14,2]/10^6,lwd=1.5,lty=2)
lines(2012:2025,annualized.totalloss.all[1:14,3]/10^6,lwd=1.5,lty=2)
dev.off()
file.copy('figures/annual_revenueloss_total.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/annual_revenueloss_total.pdf')
pdf('figures/annual_revenueloss_bev.pdf')
par(mar=c(6,8,2,2))
plot(2012:2025,annualized.totalloss[[1]][1:14]/10^6,ylim=c(0,max(annualized.totalloss.all)/10^6),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss\n(millions of $)',cex.lab=2)
grid()
axis(1,las=0,tck=.02,cex.axis=1.5)
axis(2,las=2,tck=.02,cex.axis=1.5)
lines(2012:2025,annualized.totalloss[[1]][1:14]/10^6,lwd=2,col='red')
lines(2012:2025,annualized.totalloss[[4]][1:14]/10^6,lwd=1.5,lty=2,col='red')
lines(2012:2025,annualized.totalloss[[7]][1:14]/10^6,lwd=1.5,lty=2,col='red')
dev.off()
file.copy('figures/annual_revenueloss_bev.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/annual_revenueloss_bev.pdf')
pdf('figures/annual_revenueloss_phev10.pdf')
par(mar=c(6,8,2,2))
plot(2012:2025,annualized.totalloss[[1]][1:14]/10^6,ylim=c(0,max(annualized.totalloss.all)/10^6),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss\n(millions of $)',cex.lab=2)
grid()
axis(1,las=0,tck=.02,cex.axis=1.5)
axis(2,las=2,tck=.02,cex.axis=1.5)
lines(2012:2025,annualized.totalloss[[2]][1:14]/10^6,lwd=2,col='blue')
lines(2012:2025,annualized.totalloss[[5]][1:14]/10^6,lwd=1.5,lty=2,col='blue')
lines(2012:2025,annualized.totalloss[[8]][1:14]/10^6,lwd=1.5,lty=2,col='blue')
dev.off()
file.copy('figures/annual_revenueloss_phev10.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/annual_revenueloss_phev10.pdf')
pdf('figures/annual_revenueloss_phev40.pdf')
par(mar=c(6,8,2,2))
plot(2012:2025,annualized.totalloss[[1]][1:14]/10^6,ylim=c(0,max(annualized.totalloss.all)/10^6),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss\n(millions of $)',cex.lab=2)
grid()
axis(1,las=0,cex.axis=1.5)
axis(2,las=2,cex.axis=1.5)
lines(2012:2025,annualized.totalloss[[3]][1:14]/10^6,lwd=2,col='forestgreen')
lines(2012:2025,annualized.totalloss[[6]][1:14]/10^6,lwd=1.5,lty=2,col='forestgreen')
lines(2012:2025,annualized.totalloss[[9]][1:14]/10^6,lwd=1.5,lty=2,col='forestgreen')
dev.off()
file.copy('figures/annual_revenueloss_phev40.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/annual_revenueloss_phev40.pdf')
#####################################################################################################
#####################################################################################################
#Sensitivity Analysis
#function to generate revenue from alternative policy: registration fee
alt1 <- function(input,msrp,percentage,lifetime) {
out <- input
out$Registration.Fees <- msrp*percentage*lifetime
out$Total <- rowSums(out[,3:7])
return(out)
}
alt1.leaf <- alt1(leaf,21300,.01,12)
alt1.volt <- alt1(volt,31645,.01,12)
alt1.priusphev <- alt1(prius.phev,32000,.01,12)
#function to generate revenue from alternative policy: use fee
alt2 <- function(input,electricmode.percent,vmt,lifetime,taxrate) {
out <- input
out$State.Fuel.Tax <- out$State.Fuel.Tax+lifetime*vmt*electricmode.percent*taxrate
out$Total <- rowSums(out[,3:7])
return(out)
}
alt2.leaf <- alt2(leaf,1,12000,12,.01)
alt2.volt <- alt2(volt,.64,12000,12,.01)
alt2.priusphev <- alt2(prius.phev,.288,12000,12,.01)
#sensitivity on registration fee
sensitivity1 <- function(input) {
hold <- future.revenue.decrease(all.cases,alt1(leaf,21300,input,12)$Total,alt1(prius.phev,32000,input,12)$Total,alt1(volt,31645,input,12)$Total)
out <- as.matrix(data.frame('mean.totalrev'=sum(hold[[1]],hold[[2]],hold[[3]]),'q25.totalrev'=sum(hold[[4]],hold[[5]],hold[[6]]),'q975.totalrev'=sum(hold[[7]],hold[[8]],hold[[9]])))
return(out)
}
alt1.sensitivity <- sapply(seq(0,.01,by=.001),sensitivity1)
#sensitivity on use fee
sensitivity2 <- function(input) {
hold <- future.revenue.decrease(all.cases,alt2(leaf,1,12000,12,input)$Total,alt2(prius.phev,.288,12000,12,input)$Total,alt2(volt,.64,12000,12,input)$Total)
out <- as.matrix(data.frame('mean.totalrev'=sum(hold[[1]],hold[[2]],hold[[3]]),'q25.totalrev'=sum(hold[[4]],hold[[5]],hold[[6]]),'q975.totalrev'=sum(hold[[7]],hold[[8]],hold[[9]])))
return(out)
}
alt2.sensitivity <- sapply(seq(0,.1,by=.01),sensitivity2)
#plot of sensitivity on registration fee
pdf('figures/registrationfee_sensitivity.pdf')
par(mar=c(8,8,1,1)+0.1)
plot(seq(0,.01,by=.001)*100,alt1.sensitivity[1,]/10^9,type="n",lwd=2.5,xlab="",ylab="",ylim=c(min(alt1.sensitivity/10^9),max(alt1.sensitivity/10^9)),xaxt='n',yaxt='n')
axis(1,cex.axis=1,las=0,tck=.02,cex.axis=2)
axis(2,cex.axis=1,las=2,tck=.02,cex.axis=2)
mtext('Annual Registration Fee \n Percentage of MSRP',side=1,line=5,cex=2)
mtext('Cumulative Revenue Decrease from \nEVs by 2025 (billions of dollars)',side=2,line=4,cex=2)
grid()
abline(h=0,col='red')
lines(seq(0,.01,by=.001)*100,alt1.sensitivity[1,]/10^9,lwd=2.5)
lines(c(0,0.01)*100,c(alt1.sensitivity[3,1],alt1.sensitivity[2,11])/10^9,lwd=2.5,lty=2)
lines(c(0,0.01)*100,c(alt1.sensitivity[2,1],alt1.sensitivity[3,11])/10^9,lwd=2.5,lty=2)
dev.off()
file.copy('figures/registrationfee_sensitivity.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/registrationfee_sensitivity.pdf')
#plot of sensitivity on use fee
pdf('figures/usefee_sensitivity.pdf')
par(mar=c(6,8,1,1)+0.1)
plot(seq(0,.1,by=.01)*100,alt2.sensitivity[1,]/10^9,type="n",lwd=2.5,xlab="",ylab="",ylim=c(min(alt2.sensitivity/10^9),max(alt2.sensitivity/10^9)),xaxt='n',yaxt='n')
axis(1,cex.axis=1,las=0,tck=.02,cex.axis=2)
axis(2,cex.axis=1,las=2,tck=.02,cex.axis=2)
mtext('Use Fee Tax (cents per mile)',side=1,line=3,cex=2)
mtext('Cumulative Revenue Decrease from \nEVs by 2025 (billions of dollars)',side=2,line=4,cex=2)
grid()
abline(h=0,col='red')
lines(seq(0,.1,by=.01)*100,alt2.sensitivity[1,]/10^9,lwd=2.5)
lines(c(0,0.1)*100,c(alt2.sensitivity[3,1]/10^9,alt2.sensitivity[2,11]/10^9),lwd=2.5,lty=2)
lines(c(0,0.1)*100,c(alt2.sensitivity[2,1]/10^9,alt2.sensitivity[3,11]/10^9),lwd=2.5,lty=2)
dev.off()
file.copy('figures/usefee_sensitivity.pdf','~/Dropbox/CarnegieMellon/Dissertation/chapters/chapter2/figures/usefee_sensitivity.pdf')
###
#upper-bound analysis
###
epri.nrdc.2007 <- eia.2013
epri.nrdc.2007$BEV100 <- epri.nrdc.2007$BEV*forecasts2$EPRI.NRDC2007[5:18]/forecasts2$AEO2013[5:18]
epri.nrdc.2007$PHEV10 <- epri.nrdc.2007$PHEV10*forecasts2$EPRI.NRDC2007[5:18]/forecasts2$AEO2013[5:18]
epri.nrdc.2007$PHEV40 <- epri.nrdc.2007$PHEV40*forecasts2$EPRI.NRDC2007[5:18]/forecasts2$AEO2013[5:18]
epri.nrdc.2007$FFV <- epri.nrdc.2007$FFV*forecasts2$EPRI.NRDC2007[5:18]/forecasts2$AEO2013[5:18]
future.sales.bystate.upper <- function() {
#states in each region
pacific.states <- c('ALASKA','WASHINGTON','OREGON','CALIFORNIA','HAWAII')
mountain.states <- c('MONTANA','IDAHO','NEVADA','UTAH','WYOMING','COLORADO','ARIZONA','NEW.MEXICO')
west.north.central.states <- c('NORTH.DAKOTA','MINNESOTA','SOUTH.DAKOTA','NEBRASKA','KANSAS','IOWA','MISSOURI')
west.south.central.states <- c('TEXAS','OKLAHOMA','ARKANSAS','LOUISIANA')
east.north.central.states <- c('WISCONSIN','MICHIGAN','ILLINOIS','INDIANA','OHIO')
east.south.central.states <- c('KENTUCKY','TENNESSEE','MISSISSIPPI','ALABAMA')
south.atlantic.states <- c('WEST.VIRGINIA','MARYLAND','DELAWARE','VIRGINIA','NORTH.CAROLINA','SOUTH.CAROLINA','GEORGIA','FLORIDA')
middle.atlantic.states <- c('PENNSYLVANIA','NEW.YORK','NEW.JERSEY')
newengland.states <- c('MAINE','VERMONT','NEW.HAMPSHIRE','MASSACHUSETTS','RHODE.ISLAND','CONNECTICUT')
#creating empty arrays for final list output
BEV100.out <- array(dim=c(nrow(epri.nrdc.2007),50,nrow(prius.sales)))
PHEV10.out <- array(dim=c(nrow(epri.nrdc.2007),50,nrow(prius.sales)))
PHEV40.out <- array(dim=c(nrow(epri.nrdc.2007),50,nrow(prius.sales)))
FFV.out <- array(dim=c(nrow(epri.nrdc.2007),50,nrow(prius.sales)))
for(j in 1:nrow(prius.sales)) {
hold.sales <- prius.sales[j,]
#creating empty matrices to be filled, one for each row of prius sales
BEV100.bystate <- matrix(numeric(0),nrow=nrow(epri.nrdc.2007))
PHEV10.bystate <- matrix(numeric(0),nrow=nrow(epri.nrdc.2007))
PHEV40.bystate <- matrix(numeric(0),nrow=nrow(epri.nrdc.2007))
FFV.bystate <- matrix(numeric(0),nrow=nrow(epri.nrdc.2007))
for(i in region.list) {
#takes prius sales and keeps only states within the region
hold.statesales <- hold.sales[colnames(hold.sales)%in%get(paste(i,'.states',sep=''))]
#transformation of the sales to the proportional distribution in each region
hold.statetotal <- sum(hold.statesales)
hold.statedist <- hold.statesales/hold.statetotal
for(k in colnames(epri.nrdc.2007[-1])) {
#gives table of sales by state for each technology
assign(paste('hold.',k,'.bystate',sep=''),(as.matrix('[['(get(i),k),ncol=1)%*%as.matrix(hold.statedist,nrow=1)))
#appends the table to the matrix holding all other regions that have been run
assign(paste(k,'.bystate',sep=''),cbind(get(paste(k,'.bystate',sep='')),get(paste('hold.',k,'.bystate',sep=''))))
}
}
#assigns completed table (of each technology sales across 50 states) to the empty array
BEV100.out[,,j] <- BEV100.bystate
PHEV10.out[,,j] <- PHEV10.bystate
PHEV40.out[,,j] <- PHEV40.bystate
FFV.out[,,j] <- FFV.bystate
}
state.order <<- colnames(BEV100.bystate)
out <- list(BEV100.out,PHEV10.out,PHEV40.out,FFV.out)
return(out)
}
projectedsales.bystate.upper <- future.sales.bystate.upper()
for(i in 1:length(projectedsales.bystate.upper)){
for(j in 1:dim(projectedsales.bystate.upper[[i]])[3])
for(k in 1:ncol(projectedsales.bystate.upper[[i]][,,j]))
projectedsales.bystate.upper[[i]][,k,j] <- projectedsales.bystate.upper[[i]][,k,j]*forecasts2$EPRI.NRDC2007[5:18]/forecasts2$AEO2013[5:18]
}
state.order <- tolower(state.order)
state.order <- gsub('\\.',' ',state.order)
for(i in 1:ncol(epri.nrdc.2007[-1])) {
assign(paste('mean.projected',colnames(epri.nrdc.2007[i+1]),'.bystate.upper',sep=''),as.data.frame(apply(projectedsales.bystate.upper[[i]],c(1,2),mean)))
assign(paste('q25.projected',colnames(epri.nrdc.2007[i+1]),'.bystate.upper',sep=''),apply(projectedsales.bystate.upper[[i]],c(1,2),quantile,probs=.025))
assign(paste('q975.projected',colnames(epri.nrdc.2007[i+1]),'.bystate.upper',sep=''),apply(projectedsales.bystate.upper[[i]],c(1,2),quantile,probs=.975))
}
all.cases.upper <- list(mean.projectedBEV100.bystate.upper, mean.projectedPHEV10.bystate.upper, mean.projectedPHEV40.bystate.upper, mean.projectedFFV.bystate.upper, q25.projectedBEV100.bystate.upper, q25.projectedPHEV10.bystate.upper, q25.projectedPHEV40.bystate.upper, q25.projectedFFV.bystate.upper, q975.projectedBEV100.bystate.upper, q975.projectedPHEV10.bystate.upper, q975.projectedPHEV40.bystate.upper, q975.projectedFFV.bystate.upper)
name.it <- function(table) {
colnames(table) <- state.order
table <- table[,order(colnames(table))]
return(table)
}
all.cases.upper <- lapply(all.cases.upper,name.it)
#####################################################################################################
#####################################################################################################
#Calculating projected revenue losses
#list of revenue by year, state, and technology and distribution (row=state, column=year, list num=technology and dist: by 3's BEV, PHEV10, PHEV40, sets of 3 mean, q25, q975)
all.revenues.decrease.upper <- future.revenue.decrease(all.cases.upper,leaf$Total,prius.phev$Total,volt$Total)
#####################################################################################################
#####################################################################################################
#State level map (only needs state and technology)
#data reduction to state and technology (use only list 1-3, sum columns)
total.state.revenueloss.upper <- data.frame('bev.loss'=rowSums(all.revenues.decrease.upper[[1]]),'phev10.loss'=rowSums(all.revenues.decrease.upper[[2]]),'phev40.loss'=rowSums(all.revenues.decrease.upper[[3]]))
total.state.revenueloss.upper$totalloss <- rowSums(total.state.revenueloss.upper)
total.state.revenueloss.upper$state <- rownames(total.state.revenueloss.upper)
#assigning colors by quantiles of loss
loss.quantiles.upper <- quantile(total.state.revenueloss.upper$totalloss ,probs=c(.2,.4,.6,.8))
for(i in 1:nrow(total.state.revenueloss.upper)) {
if(total.state.revenueloss.upper$totalloss [i]>loss.quantiles.upper[4]) {
total.state.revenueloss.upper$color[i] <- "red4"
}
else if(total.state.revenueloss.upper$totalloss [i]>loss.quantiles.upper[3]) {
total.state.revenueloss.upper$color[i] <- "red1"
}
else if(total.state.revenueloss.upper$totalloss [i]>loss.quantiles.upper[2]) {
total.state.revenueloss.upper$color[i] <- "indianred2"
}
else if(total.state.revenueloss.upper$totalloss [i]>loss.quantiles.upper[1]) {
total.state.revenueloss.upper$color[i] <- "pink1"
}
else {
total.state.revenueloss.upper$color[i] <- "white"
}
}
pdf('figures/upperbound_revenue_loss_bystate.pdf')
map("state",fill=TRUE,col=total.state.revenueloss.upper$color[match(mapnames.state,total.state.revenueloss.upper$state)])
legend("bottomright",leg=c(as.expression(paste("< ",abs(signif(loss.quantiles.upper[1]/10^6,2)))),
as.expression(paste(abs(signif(loss.quantiles.upper[1]/10^6,2))," - ",abs(signif(loss.quantiles.upper[2]/10^6,2)))),
as.expression(paste(abs(signif(loss.quantiles.upper[2]/10^6,2))," - ",abs(signif(loss.quantiles.upper[3]/10^6,2)))),
as.expression(paste(abs(signif(loss.quantiles.upper[3]/10^6,2))," - ",abs(signif(loss.quantiles.upper[4]/10^6,2)))),
as.expression(paste(abs(signif(loss.quantiles.upper[4]/10^6,2))," - ",abs(signif(max(total.state.revenueloss.upper$totalloss)/10^6,2))))),
fill=rev(c("red4","red1","indianred2","pink1","white")),title="Revenue Loss \n(millions of $)",bty='n',cex=.7)
dev.off()
#####################################################################################################
#####################################################################################################
#Calculating revenue loss over time
cumulative.loss.byyear.upper <- data.frame('mean.loss.bev'=colSums(all.revenues.decrease.upper[[1]]),'mean.loss.phev10'=colSums(all.revenues.decrease.upper[[2]]),'mean.loss.phev40'=colSums(all.revenues.decrease.upper[[3]]),'q25.loss.bev'=colSums(all.revenues.decrease.upper[[4]]),'q25.loss.phev10'=colSums(all.revenues.decrease.upper[[5]]),'q25.loss.phev40'=colSums(all.revenues.decrease.upper[[6]]),'q975.loss.bev'=colSums(all.revenues.decrease.upper[[7]]),'q975.loss.phev10'=colSums(all.revenues.decrease.upper[[8]]),'q975.loss.phev40'=colSums(all.revenues.decrease.upper[[9]]))
annualized.totalloss.upper <- lapply(cumulative.loss.byyear.upper,stagger.add)
annualized.totalloss.all.upper <- data.frame('mean.total'=annualized.totalloss.upper[[1]]+annualized.totalloss.upper[[2]]+annualized.totalloss.upper[[3]],'q25.total'=annualized.totalloss.upper[[4]]+annualized.totalloss.upper[[5]]+annualized.totalloss.upper[[6]],'q975.total'=annualized.totalloss.upper[[7]]+annualized.totalloss.upper[[8]]+annualized.totalloss.upper[[9]])
#plotting total revenue decreases
pdf('figures/upperbound_annual_revenueloss.pdf')
par(mar=c(6,4,2,2))
plot(2012:2025,annualized.totalloss.upper[[1]][1:14]/10^6,ylim=c(0,max(annualized.totalloss.all.upper)/10^6),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss (millions of $)')
grid()
axis(1,las=0,tck=.02)
axis(2,las=2,tck=.02)
lines(2012:2025,annualized.totalloss.all.upper[1:14,1]/10^6,lwd=2)
lines(2012:2025,annualized.totalloss.all.upper[1:14,2]/10^6,lwd=1.5,lty=2)
lines(2012:2025,annualized.totalloss.all.upper[1:14,3]/10^6,lwd=1.5,lty=2)
for(i in 1:length(annualized.totalloss.upper)) {
if(1<=i&i<=3) {
line.type <- 1
if(i%%3==1) {
color.assignment <- 'red'
lines(2012:2025,annualized.totalloss.upper[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=2)
}
else if(i%%3==2) {
color.assignment <- 'blue'
lines(2012:2025,annualized.totalloss.upper[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=2)
}
else {
color.assignment <- 'forestgreen'
lines(2012:2025,annualized.totalloss.upper[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=2)
}
}
else {
line.type <- 2
if(i%%3==1) {
color.assignment <- 'red'
lines(2012:2025,annualized.totalloss.upper[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=1.5)
}
else if(i%%3==2) {
color.assignment <- 'blue'
lines(2012:2025,annualized.totalloss.upper[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=1.5)
}
else {
color.assignment <- 'forestgreen'
lines(2012:2025,annualized.totalloss.upper[[i]][1:14]/10^6,col=color.assignment,lty=line.type,lwd=1.5)
}
}
}
legend("topleft",c('Total','BEV','PHEV-10','PHEV-40'),lty=c(1,1,1,1),col=c('black','red','blue','forestgreen'),bg='white')
dev.off()
#for 2x2 grid
pdf('figures/upperbound_annual_revenueloss_total.pdf')
par(mar=c(6,8,2,2))
plot(2012:2025,annualized.totalloss.upper[[1]][1:14]/10^6,ylim=c(0,max(annualized.totalloss.all.upper)/10^6),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss\n(millions of $)',cex.lab=2)
grid()
axis(1,las=0,tck=.02,cex.axis=1.5)
axis(2,las=2,tck=.02,cex.axis=1.5)
lines(2012:2025,annualized.totalloss.all.upper[1:14,1]/10^6,lwd=2)
lines(2012:2025,annualized.totalloss.all.upper[1:14,2]/10^6,lwd=1.5,lty=2)
lines(2012:2025,annualized.totalloss.all.upper[1:14,3]/10^6,lwd=1.5,lty=2)
dev.off()
pdf('figures/upperbound_annual_revenueloss_bev.pdf')
par(mar=c(6,8,2,2))
plot(2012:2025,annualized.totalloss.upper[[1]][1:14]/10^6,ylim=c(0,max(annualized.totalloss.all.upper)/10^6),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss\n(millions of $)',cex.lab=2)
grid()
axis(1,las=0,tck=.02,cex.axis=1.5)
axis(2,las=2,tck=.02,cex.axis=1.5)
lines(2012:2025,annualized.totalloss.upper[[1]][1:14]/10^6,lwd=2,col='red')
lines(2012:2025,annualized.totalloss.upper[[4]][1:14]/10^6,lwd=1.5,lty=2,col='red')
lines(2012:2025,annualized.totalloss.upper[[7]][1:14]/10^6,lwd=1.5,lty=2,col='red')
dev.off()
pdf('figures/upperbound_annual_revenueloss_phev10.pdf')
par(mar=c(6,8,2,2))
plot(2012:2025,annualized.totalloss.upper[[1]][1:14]/10^6,ylim=c(0,max(annualized.totalloss.all.upper)/10^6),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss\n(millions of $)',cex.lab=2)
grid()
axis(1,las=0,tck=.02,cex.axis=1.5)
axis(2,las=2,tck=.02,cex.axis=1.5)
lines(2012:2025,annualized.totalloss.upper[[2]][1:14]/10^6,lwd=2,col='blue')
lines(2012:2025,annualized.totalloss.upper[[5]][1:14]/10^6,lwd=1.5,lty=2,col='blue')
lines(2012:2025,annualized.totalloss.upper[[8]][1:14]/10^6,lwd=1.5,lty=2,col='blue')
dev.off()
pdf('figures/upperbound_annual_revenueloss_phev40.pdf')
par(mar=c(6,8,2,2))
plot(2012:2025,annualized.totalloss.upper[[1]][1:14]/10^6,ylim=c(0,max(annualized.totalloss.all.upper)/10^6),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss\n(millions of $)',cex.lab=2)
grid()
axis(1,las=0,cex.axis=1.5)
axis(2,las=2,cex.axis=1.5)
lines(2012:2025,annualized.totalloss.upper[[3]][1:14]/10^6,lwd=2,col='forestgreen')
lines(2012:2025,annualized.totalloss.upper[[6]][1:14]/10^6,lwd=1.5,lty=2,col='forestgreen')
lines(2012:2025,annualized.totalloss.upper[[9]][1:14]/10^6,lwd=1.5,lty=2,col='forestgreen')
dev.off()
pdf('figures/upperbound_annual_total_comparison.pdf')
par(mar=c(6,8,2,2))
plot(2012:2025,annualized.totalloss.upper[[1]][1:14]/10^9,ylim=c(0,max(annualized.totalloss.all.upper)/10^9),type='n',xaxt='n',yaxt='n',xlab='Year',ylab='Annual Revenue Loss\n(billions of $)',cex.lab=2)
grid()
axis(1,las=0,tck=.02,cex.axis=1.5)
axis(2,las=2,tck=.02,cex.axis=1.5)
lines(2012:2025,annualized.totalloss.all.upper[1:14,1]/10^9,lwd=2,col='blue4')
lines(2012:2025,annualized.totalloss.all.upper[1:14,2]/10^9,lwd=1.5,lty=2,col='blue4')
lines(2012:2025,annualized.totalloss.all.upper[1:14,3]/10^9,lwd=1.5,lty=2,col='blue4')
text(x=2023,y=.9,pos=2,labels='EPRI/NRDC EV Sales')
lines(2012:2025,annualized.totalloss.all[1:14,1]/10^9,lwd=2)
lines(2012:2025,annualized.totalloss.all[1:14,2]/10^9,lwd=1.5,lty=2)
lines(2012:2025,annualized.totalloss.all[1:14,3]/10^9,lwd=1.5,lty=2)
text(x=2024.5,y=.03,pos=2,labels='AEO2013 EV Sales')
dev.off()
#####################################################################################################
|
#' Construct a model frame
#'
#' `model_frame()` is a stricter version of [stats::model.frame()]. There are
#' a number of differences, with the main being that rows are _never_ dropped
#' and the return value is a list with the frame and terms separated into
#' two distinct objects.
#'
#' @param formula A formula or terms object representing the terms of the
#' model frame.
#'
#' @param data A data frame or matrix containing the terms of `formula`.
#'
#' @details
#'
#' The following explains the rationale for some of the difference in arguments
#' compared to [stats::model.frame()]:
#'
#' - `subset`: Not allowed because the number of rows before and after
#' `model_frame()` has been run should always be the same.
#'
#' - `na.action`: Not allowed and is forced to `"na.pass"` because the
#' number of rows before and after `model_frame()` has been run should always
#' be the same.
#'
#' - `drop.unused.levels`: Not allowed because it seems inconsistent for
#' `data` and the result of `model_frame()` to ever have the same factor column
#' but with different levels, unless specified though `original_levels`. If
#' this is required, it should be done through a recipe step explicitly.
#'
#' - `xlev`: Not allowed because this check should have been done ahead of
#' time. Use [scream()] to check the integrity of `data` against a training
#' set if that is required.
#'
#' - `...`: Not exposed because offsets are handled separately, and
#' it is not necessary to pass weights here any more because rows are never
#' dropped (so weights don't have to be subset alongside the rest of the
#' design matrix). If other non-predictor columns are required, use the
#' "roles" features of recipes.
#'
#' It is important to always use the results of `model_frame()` with
#' [model_matrix()] rather than [stats::model.matrix()] because the tibble
#' in the result of `model_frame()` does _not_ have a terms object attached.
#' If `model.matrix(<terms>, <tibble>)` is called directly, then a call to
#' `model.frame()` will be made automatically, which can give faulty results.
#'
#' @return
#'
#' A named list with two elements:
#'
#' - `"data"`: A tibble containing the model frame.
#'
#' - `"terms"`: A terms object containing the terms for the model frame.
#'
#' @examples
#' # ---------------------------------------------------------------------------
#' # Example usage
#'
#' framed <- model_frame(Species ~ Sepal.Width, iris)
#'
#' framed$data
#'
#' framed$terms
#'
#' # ---------------------------------------------------------------------------
#' # Missing values never result in dropped rows
#'
#' iris2 <- iris
#' iris2$Sepal.Width[1] <- NA
#'
#' framed2 <- model_frame(Species ~ Sepal.Width, iris2)
#'
#' head(framed2$data)
#'
#' nrow(framed2$data) == nrow(iris2)
#'
#' @export
#'
model_frame <- function(formula, data) {
validate_is_formula(formula)
data <- check_is_data_like(data)
frame <- rlang::with_options(
stats::model.frame(formula, data = data),
na.action = "na.pass"
)
# Can't simplify terms env here, sometimes we need it to exist
terms <- terms(frame)
attr(frame, "terms") <- NULL
data <- tibble::as_tibble(frame)
list(
data = data,
terms = terms
)
}
validate_is_formula <- function(formula) {
validate_is(formula, rlang::is_formula, "formula")
}
| /R/model-frame.R | no_license | marlycormar/hardhat | R | false | false | 3,321 | r | #' Construct a model frame
#'
#' `model_frame()` is a stricter version of [stats::model.frame()]. There are
#' a number of differences, with the main being that rows are _never_ dropped
#' and the return value is a list with the frame and terms separated into
#' two distinct objects.
#'
#' @param formula A formula or terms object representing the terms of the
#' model frame.
#'
#' @param data A data frame or matrix containing the terms of `formula`.
#'
#' @details
#'
#' The following explains the rationale for some of the difference in arguments
#' compared to [stats::model.frame()]:
#'
#' - `subset`: Not allowed because the number of rows before and after
#' `model_frame()` has been run should always be the same.
#'
#' - `na.action`: Not allowed and is forced to `"na.pass"` because the
#' number of rows before and after `model_frame()` has been run should always
#' be the same.
#'
#' - `drop.unused.levels`: Not allowed because it seems inconsistent for
#' `data` and the result of `model_frame()` to ever have the same factor column
#' but with different levels, unless specified though `original_levels`. If
#' this is required, it should be done through a recipe step explicitly.
#'
#' - `xlev`: Not allowed because this check should have been done ahead of
#' time. Use [scream()] to check the integrity of `data` against a training
#' set if that is required.
#'
#' - `...`: Not exposed because offsets are handled separately, and
#' it is not necessary to pass weights here any more because rows are never
#' dropped (so weights don't have to be subset alongside the rest of the
#' design matrix). If other non-predictor columns are required, use the
#' "roles" features of recipes.
#'
#' It is important to always use the results of `model_frame()` with
#' [model_matrix()] rather than [stats::model.matrix()] because the tibble
#' in the result of `model_frame()` does _not_ have a terms object attached.
#' If `model.matrix(<terms>, <tibble>)` is called directly, then a call to
#' `model.frame()` will be made automatically, which can give faulty results.
#'
#' @return
#'
#' A named list with two elements:
#'
#' - `"data"`: A tibble containing the model frame.
#'
#' - `"terms"`: A terms object containing the terms for the model frame.
#'
#' @examples
#' # ---------------------------------------------------------------------------
#' # Example usage
#'
#' framed <- model_frame(Species ~ Sepal.Width, iris)
#'
#' framed$data
#'
#' framed$terms
#'
#' # ---------------------------------------------------------------------------
#' # Missing values never result in dropped rows
#'
#' iris2 <- iris
#' iris2$Sepal.Width[1] <- NA
#'
#' framed2 <- model_frame(Species ~ Sepal.Width, iris2)
#'
#' head(framed2$data)
#'
#' nrow(framed2$data) == nrow(iris2)
#'
#' @export
#'
model_frame <- function(formula, data) {
validate_is_formula(formula)
data <- check_is_data_like(data)
frame <- rlang::with_options(
stats::model.frame(formula, data = data),
na.action = "na.pass"
)
# Can't simplify terms env here, sometimes we need it to exist
terms <- terms(frame)
attr(frame, "terms") <- NULL
data <- tibble::as_tibble(frame)
list(
data = data,
terms = terms
)
}
validate_is_formula <- function(formula) {
validate_is(formula, rlang::is_formula, "formula")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/node-funs.R
\name{route_nearest_point}
\alias{route_nearest_point}
\title{Find nearest route to a given point}
\usage{
route_nearest_point(r, p, id_out = FALSE)
}
\arguments{
\item{r}{An \code{sf} object with one feature containing a linestring geometry to be split}
\item{p}{A point represented by an \code{sf} object the will split the \code{route}}
\item{id_out}{Should the index of the matching feature be returned? \code{FALSE} by default}
}
\description{
This function was written as a drop-in replacement for \code{sf::st_nearest_feature()},
which only works with recent versions of GEOS.
}
\examples{
r <- routes_fast_sf[2:6, NULL]
p <- sf::st_sfc(sf::st_point(c(-1.540, 53.826)), crs = sf::st_crs(r))
route_nearest_point(r, p, id_out = TRUE)
r_nearest <- route_nearest_point(r, p)
plot(r$geometry)
plot(p, add = TRUE)
plot(r_nearest, lwd = 5, add = TRUE)
}
| /man/route_nearest_point.Rd | permissive | mem48/stplanr | R | false | true | 946 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/node-funs.R
\name{route_nearest_point}
\alias{route_nearest_point}
\title{Find nearest route to a given point}
\usage{
route_nearest_point(r, p, id_out = FALSE)
}
\arguments{
\item{r}{An \code{sf} object with one feature containing a linestring geometry to be split}
\item{p}{A point represented by an \code{sf} object the will split the \code{route}}
\item{id_out}{Should the index of the matching feature be returned? \code{FALSE} by default}
}
\description{
This function was written as a drop-in replacement for \code{sf::st_nearest_feature()},
which only works with recent versions of GEOS.
}
\examples{
r <- routes_fast_sf[2:6, NULL]
p <- sf::st_sfc(sf::st_point(c(-1.540, 53.826)), crs = sf::st_crs(r))
route_nearest_point(r, p, id_out = TRUE)
r_nearest <- route_nearest_point(r, p)
plot(r$geometry)
plot(p, add = TRUE)
plot(r_nearest, lwd = 5, add = TRUE)
}
|
if(!require(matconv)){
install.packages("matconv")
require(matconv)
}
matIn <- c("function value = arc_length(M, A, B)",
" L = 1000;",
" [d, c] = size(M);",
" a = min(A, B);",
" b = max(A, B);",
" delta_t = (b - a)/L;",
" inter_values = a + (0:L-1)*delta_t;",
" power_inter_values = repmat(inter_values, c, 1) ...
.^ repmat((0:c-1)', 1, length(inter_values));",
" derivate_coefficients_with_const = M .* repmat(1:c, d, 1);",
" result = derivate_coefficients_with_const * power_inter_values;",
" value = sum(sqrt(sum(result .* result, 1))) * delta_t;",
"end"
)
mat2r(matIn, verbose = 0)$rCode
| /Code/matconv.R | no_license | EduardaChagas/CDMLHC | R | false | false | 750 | r |
if(!require(matconv)){
install.packages("matconv")
require(matconv)
}
matIn <- c("function value = arc_length(M, A, B)",
" L = 1000;",
" [d, c] = size(M);",
" a = min(A, B);",
" b = max(A, B);",
" delta_t = (b - a)/L;",
" inter_values = a + (0:L-1)*delta_t;",
" power_inter_values = repmat(inter_values, c, 1) ...
.^ repmat((0:c-1)', 1, length(inter_values));",
" derivate_coefficients_with_const = M .* repmat(1:c, d, 1);",
" result = derivate_coefficients_with_const * power_inter_values;",
" value = sum(sqrt(sum(result .* result, 1))) * delta_t;",
"end"
)
mat2r(matIn, verbose = 0)$rCode
|
library(data.table)
library(plyr)
library(MCMCpack)
library(foreign)
library(matrixStats)
library(Rfast)
library(parallel)
print("Loaded the libraries...")
getProbs = function(comb, cols, samp, G.used, class.probs){
class.probs[cbind(comb,samp,G.used, cols)]
}
row.prob.vec <- function(combs.full, mix.probs, class.probs, P, sample, G){
iter.vec = rep(1:P, nrow(combs.full))
samp = rep(sample, nrow(combs.full)*ncol(combs.full))
combs = c(t(combs.full))
tmp.probs = sapply(1:G, FUN = getProbs, comb = combs, col = iter.vec, samp = samp, class.probs = class.probs)
tmp.probs = rowprods(matrix(tmp.probs, ncol = P, byrow = TRUE))
prob = sum(tmp.probs * rep(mix.probs[sample,],each = length(tmp.probs)/G))
return(prob)
}
row.prob <- function(combs, mix.probs, class.probs, P, sample, G){
##We need a fixed value for val1 and val2 and iterate over the combinations
iter.vec = 1:P
samp = rep(sample, length(combs))
tmp.probs = rowProds(t(sapply(1:G, FUN = getProbs, comb = combs, col = iter.vec, samp = samp)))
##tmp.probs = apply(apply(cbind(combs,iter.vec),1,FUN = function(info.vec, class.probs, sample){class.probs[info.vec[1], sample, , info.vec[2]]}, class.probs = class.probs, sample = sample), 1, prod)
prob = sum(mix.probs[sample,]*tmp.probs)
return(prob)
}
twoWay.probs.vectorized <- function(counts.vec, mix.probs, class.probs, index1=NULL, index2=NULL, val1=NULL, val2 = NULL, sample = 1, G){
if(is.null(index1)){
inds = which(!is.na(counts.vec))
index1 = inds[1]
index2 = inds[2]
val1 = counts.vec[inds[1]]
val2 = counts.vec[inds[2]]
}
##Find all possible combinations
P = dim(class.probs)[4]
###First, we need to find all combinations to sum over
p.opt = seq(1,P)[!(seq(1,P) %in% c(index1,index2))]
K = dim(class.probs)[1]
tmp.list = rep(list(seq(1,K)), length(p.opt))
combs = expand.grid(tmp.list)
names(combs) = p.opt
vec1 = rep(val1, nrow(combs))
vec2 = rep(val2, nrow(combs))
##Splice in these vectors at the correct place
combs.full = matrix(NA, nrow = nrow(combs), ncol = ncol(combs) + 2)
combs.full[,index1] = vec1
combs.full[,index2] = vec2
tmp.counter = 1
for(i in 1:ncol(combs.full)){
if(is.na(combs.full[1,i])){
combs.full[,i] = combs[,tmp.counter]
tmp.counter = tmp.counter + 1}
}
combs.full = data.table(combs.full)
prob = row.prob.vec(combs.full, mix.probs, class.probs, P, sample, G)
return(prob)
}#end of function
fullTab.probs <- function(mix.probs, class.probs, vals, sample = NULL){
##Vals: a P dim vector
if(length(vals) != dim(class.probs[[1]])[3]){
return("Evaluated combination is of the wrong length!!")
}
##Now, need to find the prob of each combination
##Easy way to do it: first find the prob of the combination for each latent class
##Then, multiply by the mixing probabilities
if(is.null(sample)){
##Take the mean and compute
mix.avg = apply(mix.probs, 2, "mean")
class.avg = list()
for(i in 1:length(class.probs)){
class.avg[[i]] = apply(class.probs[[i]], c(2,3), "mean")
}
prob.vec = rep(1, length(mix.avg))
for(i in 1:length(vals)){
prob.vec = prob.vec * class.avg[[vals[i]]][,i]
}
prob = sum(mix.avg * prob.vec)
} else{
prob.vec = rep(1, ncol(mix.probs))
for(i in 1:length(vals)){
prob.vec = prob.vec * class.probs[[vals[i]]][sample,,i]
}
prob = sum(mix.probs[sample,] * prob.vec)
}
return(prob)
}#End of function
lap.lik <- function(data,prop,E=-1/6){
val = 0
alpha = exp(E)
for(i in 1:length(data)){
val = val + abs(data[i] - prop[i])*log(alpha)
}
return(val)
}#end of function
dT2Lap <- function(noisyS,S,samp.size,E){
alpha = exp(E)
pmf = 0
for(i in 1:length(noisyS)){
if(noisyS[i] == 0){
pmf = pmf + log(alpha^abs(noisyS[i] - S[i])/(1+alpha))
}else if(noisyS[i] == samp.size){
pmf = pmf + log(alpha^(samp.size-S[i])/(1+alpha))
}else{
pmf = pmf + log((1-alpha)*alpha^abs(noisyS[i] - S[i])/(1+alpha))
}
}
return(pmf)
}
likelihood <- function(counts,samp.size, G, sample, counts.mat, mix.probs, comp.probs, index = NULL){
probs = rep(NA, length(counts))
if(is.null(index)){
for(y in 1:nrow(counts.mat)){
inds = which(!is.na(counts.mat[y,]))
val1 = counts.mat[y, inds[1]]
val2 = counts.mat[y, inds[2]]
probs[y] = twoWay.probs(mix.probs, comp.probs, index1 = inds[1], index2 = inds[2], val1, val2, sample = sample)
}##End of for
} else{
inds = which(!is.na(counts.mat[index,]))
val1 = counts.mat[index, inds[1]]
val2 = counts.mat[index, inds[2]]
probs = twoWay.probs(mix.probs, comp.probs, index1 = inds[1], index2 = inds[2], val1, val2, sample = sample)
}
lik = sum(dbinom(counts,samp.size, probs,log=T))
return(lik)
}#end of function
likelihood.vec <- function(counts,samp.size, G, sample, counts.mat, mix.probs, comp.probs, index = NULL, par = FALSE, clust = cl){
probs = rep(NA, length(counts))
if(is.null(index)){
if(par == TRUE){
probs = parApply(cl = clust, counts.mat,MARGIN = 1,FUN = twoWay.probs.vectorized, mix.probs= mix.probs, class.probs = comp.probs, sample = sample, G=G)
} else{
for(y in 1:nrow(counts.mat)){
inds = which(!is.na(counts.mat[y,]))
val1 = counts.mat[y, inds[1]]
val2 = counts.mat[y, inds[2]]
probs[y] = twoWay.probs.vectorized(counts.vec = NULL, mix.probs, comp.probs, index1 = inds[1], index2 = inds[2], val1, val2, sample = sample, G=G)
}##End of for
}
} else{
inds = which(!is.na(counts.mat[index,]))
val1 = counts.mat[index, inds[1]]
val2 = counts.mat[index, inds[2]]
probs = twoWay.probs.vectorized(counts.vec = NULL, mix.probs, comp.probs, index1 = inds[1], index2 = inds[2], val1, val2, sample = sample, G=G)
}
lik = sum(dbinom(counts,samp.size, probs,log=T))
return(lik)
}#end of function
###Sampling function
mcmc.sampler <- function(counts, counts.mat, eps = 1, P, G = 5, nsamples = 2000, samp.size, E.var = NULL){
###1.) Create noisy counts
Q = length(counts)/(length(unique(!is.na(c(comb.mat))))^2)
E = -(eps/Q)/2
if(is.null(E.var)){E.var = E}
errs = rgeom(length(counts),1-exp(E)) - rgeom(length(counts),1-exp(E))
noisyM = counts + errs
###Initialize the starting values
##Mixing probabilities
mix.probs = matrix(NA, nrow=nsamples, ncol = G)
mix.probs[1,] = rep(1/G,G) ##Starting value: equal weights
###Counts
M = matrix(NA, nrow = nsamples, ncol = length(noisyM))
M[1,] = noisyM + rgeom(length(counts),1-exp(E)) - rgeom(length(counts),1-exp(E))
M[1,] = ifelse(M[1,] < 0, 0, M[1,])
M[1,] = ifelse(M[1,] > samp.size, samp.size, M[1,])
###Psi
###Currently only implemented for two binary variables
comp.probs = list()
comp.probs[[1]] = array(NA, dim = c(nsamples, G, P))
##Random start
start.val = cbind(comb.mat, noisyM/samp.size)
tmp.mat = matrix(NA, nrow = G, ncol = P)
for(i in 1:ncol(tmp.mat)){
tmp.mat[,i] = mean(start.val[which(start.val[,i] == 1),ncol(tmp.mat) + 1])
}
comp.probs[[1]][1,,] = tmp.mat
comp.probs[[2]] = 1 - comp.probs[[1]]
##Marginal probabilities
marg.prob = matrix(NA, nsamples, length(noisyM))
##Full probabilities
full.prob = matrix(NA, nsamples, 2^P)
tmp.list = rep(list(seq(1,2)), P)
full.mat = expand.grid(tmp.list)
full.mat = as.matrix(full.mat)
accept = matrix(0,nrow=G, ncol=P)
accept.pi = 0
accept.M = rep(0, length(noisyM))
for(i in 2:nsamples){
##Sample for M
##Proposal
for(q in 1:length(noisyM)){
M.prop = M[i,]
M.prop[q] = noisyM[q] + rgeom(1,1-exp(E.var)) - rgeom(1,1-exp(E.var))
if(q != length(noisyM)){M.prop[(q+1):length(noisyM)] = M[(i-1),(q+1):length(noisyM)]}
if(M.prop[q]< 0 | M.prop[q] > samp.size){
r = -Inf
}else{
prop.post = lap.lik(M.prop[q],noisyM[q], E=E) + likelihood(M.prop[q], samp.size, G, i-1, counts.mat, mix.probs, comp.probs, index = q)
past.post = lap.lik(M[(i-1),q], noisyM[q], E=E) + likelihood(M[(i-1),q],samp.size, G, i-1, counts.mat, mix.probs, comp.probs, index = q)
ppropos = dT2Lap(M[(i-1),q],M.prop[q],samp.size,E) - dT2Lap(M.prop[q],M[(i-1),q],samp.size,E)
r = exp(prop.post - past.post + ppropos)
}
if(runif(1)<r){
M[i,q] = M.prop[q]
accept.M[q] = accept.M[q] + 1
}else{
M[i,q] = M[(i-1),q]
}
print(q)
}
##Sample for mixing probabilities
prop.mix = mix.probs
prop.mix[(i-1),] = mix.probs[(i-1),] + rnorm(G,0,sd = 0.005)
prop.mix[(i-1),] = prop.mix[(i-1),]/sum(prop.mix[(i-1),])
if(min(prop.mix[(i-1),]) < 0){
r = -Inf
}else{
prop.post = likelihood(M[i,], samp.size, G, sample = i-1, counts.mat, mix.probs, comp.probs) + log(ddirichlet(mix.probs[(i-1),], alpha = rep(1/G,G)))
past.post = likelihood(M[i,], samp.size, G, sample = i-1, counts.mat, prop.mix, comp.probs) + log(ddirichlet(prop.mix[(i-1),], alpha = rep(1/G,G)))
##ppropos = log(ddirichlet(mix.probs[(i-1),], alpha = prop.mix[(i-1),])) - log(ddirichlet(prop.mix[(i-1),], alpha = mix.probs[(i-1),]))
ppropos = 0
r = exp(prop.post - past.post + ppropos)
}
if(runif(1)<r){
mix.probs[i,] = prop.mix[(i-1),]
accept.pi = accept.pi + 1
}else{
mix.probs[i,] = mix.probs[(i-1),]
}
##For each latent class and variable, sample the component probs
for(k in 1:P){
for(h in 1:G){
##tmp.prop.all = rdirichlet(1, alpha = 500*c(comp.probs[[1]][1,h,k], comp.probs[[2]][1,h,k]) + 1)#c(comp.probs[[1]][1,h,k], comp.probs[[2]][1,h,k]) + rnorm(2,0,0.1)
tmp.prop.all = c(comp.probs[[1]][(i-1),h,k], comp.probs[[2]][(i-1),h,k]) + rnorm(2,0,0.01)
tmp.prop.all = tmp.prop.all / sum(tmp.prop.all)
if(min(tmp.prop.all) < 0 | max(tmp.prop.all) > 1){
r = -Inf
} else{
tmp.prop = tmp.prop.all[1]
prop.probs = list()
prop.probs[[1]] = comp.probs[[1]][(i-1):i,,]
prop.probs[[1]][2,,] = ifelse(is.na(prop.probs[[1]][2,,]), prop.probs[[1]][1,,], prop.probs[[1]][2,,])
prop.probs[[2]] = 1-prop.probs[[1]]
prop.tmp.probs = prop.probs
prop.tmp.probs[[1]][2,h,k] = tmp.prop
prop.tmp.probs[[2]] = 1 - prop.tmp.probs[[1]]
mix.probs.hold = rbind(mix.probs[1,], mix.probs[i,])
prop.post = likelihood(M[i,],samp.size,G, sample = 2, counts.mat, mix.probs.hold, prop.tmp.probs) + log(ddirichlet(c(tmp.prop, 1-tmp.prop),alpha = rep(.5,2)))
past.post = likelihood(M[i,],samp.size,G, sample = 2, counts.mat, mix.probs.hold, prop.probs) + log(ddirichlet(c(comp.probs[[1]][i-1,h,k], comp.probs[[2]][i-1,h,k]),alpha = rep(.5,2)))
##ppropos = log(ddirichlet(c(comp.probs[[1]][i-1,h,k], comp.probs[[2]][i-1,h,k]),alpha = tmp.prop.all)) - log(ddirichlet(tmp.prop.all,alpha = c(comp.probs[[1]][i-1,h,k], comp.probs[[2]][i-1,h,k])))
ppropos = 0
r = exp(prop.post - past.post + ppropos)
}
if(runif(1)<r){
comp.probs[[1]][i,h,k] = tmp.prop
accept[h,k] = accept[h,k] + 1
}else{
comp.probs[[1]][i,h,k] = comp.probs[[1]][i-1,h,k]
}
comp.probs[[2]] = 1 - comp.probs[[1]]
}##End of for
}##End of for
###Calculate marginal probability for sample
for(y in 1:nrow(counts.mat)){
inds = which(!is.na(counts.mat[y,]))
val1 = counts.mat[y, inds[1]]
val2 = counts.mat[y, inds[2]]
marg.prob[i,y] = twoWay.probs(mix.probs, comp.probs, index1 = inds[1], index2 = inds[2], val1, val2, sample = i)
}##End of for
###Calculate full probability for sample
#for(z in 1:nrow(full.mat)){
# full.prob[i,z] = fullTab.probs(mix.probs, comp.probs, vals = full.mat[z,], sample = i)
#}
print(i)
}##End of for
return(list("marg_probs" = marg.prob, "full_probs" = full.prob, "M" = M, "Psi" = comp.probs, "Pi" = mix.probs, "accept_rate_pi" = accept.pi, "accept_rate_psi" = accept, "accept_rate_M" = accept.M))
}##End of function
###Sampling function
mcmc.sampler.v2 <- function(counts, counts.mat, eps = 1, P, G = 5, nsamples = 2000, samp.size, E.var = NULL){
###1.) Create noisy counts
Q = length(counts)/(length(unique(!is.na(c(comb.mat))))^2)
E = -(eps/Q)/2
if(is.null(E.var)){E.var = E}
errs = rgeom(length(counts),1-exp(E)) - rgeom(length(counts),1-exp(E))
noisyM = counts + errs
###Initialize the starting values
##Mixing probabilities
mix.probs = matrix(NA, nrow=nsamples, ncol = G)
mix.probs[1,] = rep(1/G,G) ##Starting value: equal weights
###Counts
M = matrix(NA, nrow = nsamples, ncol = length(noisyM))
M[1,] = noisyM + rgeom(length(counts),1-exp(E)) - rgeom(length(counts),1-exp(E))
M[1,] = ifelse(M[1,] < 0, 0, M[1,])
M[1,] = ifelse(M[1,] > samp.size, samp.size, M[1,])
###Psi
###Currently only implemented for two binary variables
comp.probs = array(NA, dim = c(2,nsamples,G,P))
##Random start
start.val = cbind(comb.mat, noisyM/samp.size)
tmp.mat = matrix(NA, nrow = G, ncol = P)
for(i in 1:ncol(tmp.mat)){
tmp.mat[,i] = mean(start.val[which(start.val[,i] == 1),ncol(tmp.mat) + 1])
}
comp.probs[1,1,,] = tmp.mat
comp.probs[2,,,] = 1 - comp.probs[1,,,]
##Marginal probabilities
marg.prob = matrix(NA, nsamples, length(noisyM))
##Full probabilities
full.prob = matrix(NA, nsamples, 2^P)
tmp.list = rep(list(seq(1,2)), P)
full.mat = expand.grid(tmp.list)
full.mat = as.matrix(full.mat)
accept = matrix(0,nrow=G, ncol=P)
accept.pi = 0
accept.M = rep(0, length(noisyM))
for(i in 2:nsamples){
##Sample for M
##Proposal
for(q in 1:length(noisyM)){
M.prop = M[i,]
M.prop[q] = noisyM[q] + rgeom(1,1-exp(E.var)) - rgeom(1,1-exp(E.var))
if(q != length(noisyM)){M.prop[(q+1):length(noisyM)] = M[(i-1),(q+1):length(noisyM)]}
if(M.prop[q]< 0 | M.prop[q] > samp.size){
r = -Inf
}else{
prop.post = lap.lik(M.prop[q],noisyM[q], E=E) + likelihood.vec(M.prop[q], samp.size, G, i-1, counts.mat, mix.probs, comp.probs, index = q)
if(q == 1){
past.post = lap.lik(M[(i-1),q], noisyM[q], E=E) + likelihood.vec(M[(i-1),q],samp.size, G, i-1, counts.mat, mix.probs, comp.probs, index = q)
}
ppropos = dT2Lap(M[(i-1),q],M.prop[q],samp.size,E) - dT2Lap(M.prop[q],M[(i-1),q],samp.size,E)
r = exp(prop.post - past.post + ppropos)
}
if(runif(1)<r){
M[i,q] = M.prop[q]
accept.M[q] = accept.M[q] + 1
past.post = prop.post
}else{
M[i,q] = M[(i-1),q]
past.post = past.post
}
print(q)
}
##Sample for mixing probabilities
prop.mix = mix.probs
prop.mix[(i-1),] = mix.probs[(i-1),] + rnorm(G,0,sd = 0.005)
prop.mix[(i-1),] = prop.mix[(i-1),]/sum(prop.mix[(i-1),])
if(min(prop.mix[(i-1),]) < 0){
r = -Inf
}else{
prop.post = likelihood.vec(M[i,], samp.size, G, sample = i-1, counts.mat, mix.probs, comp.probs, par = TRUE) + log(ddirichlet(mix.probs[(i-1),], alpha = rep(1/G,G)))
past.post = likelihood.vec(M[i,], samp.size, G, sample = i-1, counts.mat, prop.mix, comp.probs) + log(ddirichlet(prop.mix[(i-1),], alpha = rep(1/G,G)))
##ppropos = log(ddirichlet(mix.probs[(i-1),], alpha = prop.mix[(i-1),])) - log(ddirichlet(prop.mix[(i-1),], alpha = mix.probs[(i-1),]))
ppropos = 0
r = exp(prop.post - past.post + ppropos)
}
if(runif(1)<r){
mix.probs[i,] = prop.mix[(i-1),]
accept.pi = accept.pi + 1
past.post = prop.post
}else{
mix.probs[i,] = mix.probs[(i-1),]
past.post = past.post
}
##For each latent class and variable, sample the component probs
for(k in 1:P){
for(h in 1:G){
##tmp.prop.all = rdirichlet(1, alpha = 500*c(comp.probs[[1]][1,h,k], comp.probs[[2]][1,h,k]) + 1)#c(comp.probs[[1]][1,h,k], comp.probs[[2]][1,h,k]) + rnorm(2,0,0.1)
tmp.prop.all = c(comp.probs[1,(i-1),h,k], comp.probs[2,(i-1),h,k]) + rnorm(2,0,0.01)
tmp.prop.all = tmp.prop.all / sum(tmp.prop.all)
if(min(tmp.prop.all) < 0 | max(tmp.prop.all) > 1){
r = -Inf
} else{
tmp.prop = tmp.prop.all[1]
prop.probs = comp.probs[,(i-1):i,,]
prop.probs[1,2,,] = ifelse(is.na(prop.probs[1,2,,]), prop.probs[1,1,,], prop.probs[1,2,,])
prop.probs[2,,,] = 1-prop.probs[1,,,]
prop.tmp.probs = prop.probs
prop.tmp.probs[1,2,h,k] = tmp.prop
prop.tmp.probs[2,,,] = 1 - prop.tmp.probs[1,,,]
mix.probs.hold = rbind(mix.probs[1,], mix.probs[i,])
start = Sys.time()
prop.post = likelihood.vec(M[i,],samp.size,G, sample = 2, counts.mat, mix.probs = mix.probs.hold, comp.probs =prop.tmp.probs, par = TRUE) + log(ddirichlet(c(tmp.prop, 1-tmp.prop),alpha = rep(.5,2)))
end = Sys.time()
##past.post = likelihood.vec(M[i,],samp.size,G, sample = 2, counts.mat, mix.probs = mix.probs.hold, comp.probs = prop.probs) + log(ddirichlet(c(comp.probs[1,i-1,h,k], comp.probs[2,i-1,h,k]),alpha = rep(.5,2)))
##ppropos = log(ddirichlet(c(comp.probs[[1]][i-1,h,k], comp.probs[[2]][i-1,h,k]),alpha = tmp.prop.all)) - log(ddirichlet(tmp.prop.all,alpha = c(comp.probs[[1]][i-1,h,k], comp.probs[[2]][i-1,h,k])))
ppropos = 0
r = exp(prop.post - past.post + ppropos)
}
if(runif(1)<r){
comp.probs[1,i,h,k] = tmp.prop
accept[h,k] = accept[h,k] + 1
past.post = prop.post
}else{
comp.probs[1,i,h,k] = comp.probs[1,i-1,h,k]
past.post = past.post
}
comp.probs[2,,,] = 1 - comp.probs[1,,,]
}##End of for
}##End of for
###Calculate marginal probability for sample
marg.prob[i,] = parApply(cl = cl, counts.mat,MARGIN = 1,FUN = twoWay.probs.vectorized, mix.probs= mix.probs, class.probs = comp.probs, sample = i, G=G)
###Calculate full probability for sample
#for(z in 1:nrow(full.mat)){
# full.prob[i,z] = fullTab.probs(mix.probs, comp.probs, vals = full.mat[z,], sample = i)
#}
print(i)
}##End of for
return(list("marg_probs" = marg.prob, "M" = M, "Psi" = comp.probs, "Pi" = mix.probs, "accept_rate_pi" = accept.pi, "accept_rate_psi" = accept, "accept_rate_M" = accept.M))
}##End of function
mcmc.sampler.v3 <- function(counts, counts.mat, eps = 1, P, G = 5, nsamples = 2000, samp.size, E.var = NULL, results.dir, .methodPi = "normal"){
###1.) Create noisy counts
Q = length(counts)/(length(unique(!is.na(c(comb.mat))))^2)
E = -(eps/Q)/2
if(is.null(E.var)){E.var = E}
errs = rgeom(length(counts),1-exp(E)) - rgeom(length(counts),1-exp(E))
noisyM = counts + errs
###Initialize the starting values
##Mixing probabilities
load(paste0(results.dir, "/0.5_iter100b.Rdata"))
mix.probs = matrix(NA, nrow=nsamples, ncol = G)
mix.probs[1,] = samps$Pi[4000,] ##rdirichlet(1,rep(1/G,G)) ##Starting value: equal weights
###Counts
M = matrix(NA, nrow = nsamples, ncol = length(noisyM))
M[1,] = noisyM + rgeom(length(counts),1-exp(E)) - rgeom(length(counts),1-exp(E))
M[1,] = ifelse(M[1,] < 0, 0, M[1,])
M[1,] = ifelse(M[1,] > samp.size, samp.size, M[1,])
###Psi
###Currently only implemented for two binary variables
comp.probs = array(NA, dim = c(2,nsamples,G,P))
##Random start
#start.val = cbind(comb.mat, noisyM/samp.size, noisyM)
#tmp.mat = matrix(NA, nrow = G, ncol = P)
#for(i in 1:ncol(tmp.mat)){
# nrow.starts = nrow(start.val[which(start.val[,i] == 1),])
# tmp.mat[,i] = sum(start.val[which(start.val[,i] == 1),ncol(tmp.mat) + 2])/(samp.size*nrow.starts/2)
#
#}
comp.probs[1,1,,] = samps$Psi[1,4000,,]
comp.probs[2,,,] = 1 - comp.probs[1,,,]
##Marginal probabilities
marg.prob = matrix(NA, nsamples, length(noisyM))
##Full probabilities
full.prob = matrix(NA, nsamples, 2^P)
tmp.list = rep(list(seq(1,2)), P)
full.mat = expand.grid(tmp.list)
full.mat = as.matrix(full.mat)
accept = rep(0,G)
accept.pi = 0
accept.M = rep(0, length(noisyM))
for(i in 2:nsamples){
##Sample for M
##Proposal
for(q in 1:length(noisyM)){
M.prop = M[(i-1),]
M.prop[q] = noisyM[q] + rgeom(1,1-exp(E.var)) - rgeom(1,1-exp(E.var))
if(q != length(noisyM)){M.prop[(q+1):length(noisyM)] = M[(i-1),(q+1):length(noisyM)]}
if(M.prop[q]< 0 | M.prop[q] > samp.size){
r = -Inf
}else{
bin.lik.new = likelihood.vec(M.prop[q], samp.size, G, i-1, counts.mat, mix.probs, comp.probs, index = q)
prop.post = lap.lik(M.prop[q],noisyM[q], E=E) + bin.lik.new
bin.lik= likelihood.vec(M[(i-1),q],samp.size, G, i-1, counts.mat, mix.probs, comp.probs, index = q)
past.post = lap.lik(M[(i-1),q], noisyM[q], E=E) + bin.lik
ppropos = dT2Lap(M[(i-1),q],M.prop[q],samp.size,E) - dT2Lap(M.prop[q],M[(i-1),q],samp.size,E)
r = exp(prop.post - past.post + ppropos)
}
if(runif(1)<r){
M[i,q] = M.prop[q]
accept.M[q] = accept.M[q] + 1
} else{
M[i,q] = M[(i-1),q]
}
}
##Sample for mixing probabilities
if(.methodPi == "dirichlet"){
prop.mix = mix.probs
prop.mix[(i-1),] = rdirichlet(1, alpha = 50 * mix.probs[(i-1),] + .5) ###50 * mix.probs[(i-1),] + 1
if(min(prop.mix[(i-1),]) < 0){
r = -Inf
}else{
prop.post = likelihood.vec(M[i,], samp.size, G, sample = i-1, counts.mat, prop.mix, comp.probs, par = TRUE) + log(ddirichlet(prop.mix[(i-1),], alpha = rep(1/G,G)))
past.post = likelihood.vec(M[i,], samp.size, G, sample = i-1, counts.mat, mix.probs, comp.probs, par = TRUE) + log(ddirichlet(mix.probs[(i-1),], alpha = rep(1/G,G)))
ppropos = log(ddirichlet(mix.probs[(i-1),], alpha = 50 * prop.mix[(i-1),] + .5)) - log(ddirichlet(prop.mix[(i-1),], alpha = 50 * mix.probs[(i-1),] +.5))
##ppropos = log(ddirichlet(mix.probs[(i-1),], alpha = rep(1/G, G))) - log(ddirichlet(prop.mix[(i-1),], alpha = rep(1/G,G)))
r = exp(prop.post - past.post + ppropos)
}
if(runif(1)<r){
mix.probs[i,] = prop.mix[(i-1),]
accept.pi = accept.pi + 1
}else{
mix.probs[i,] = mix.probs[(i-1),]
}
} else{
prop.mix = mix.probs
eta = mix.probs[(i-1),] + rnorm(G,0,sd = 0.01)
prop.mix[(i-1),] = eta
prop.mix[(i-1),] = prop.mix[(i-1),]/sum(prop.mix[(i-1),])
if(min(prop.mix[(i-1),]) < 0){
r = -Inf
}else{
prop.post = likelihood.vec(M[i,], samp.size, G, sample = i-1, counts.mat, mix.probs, comp.probs, cl=cl) + sum(dgamma(mix.probs[(i-1),], shape = 1/G, scale = 1, log = TRUE))
past.post = likelihood.vec(M[i,], samp.size, G, sample = i-1, counts.mat, prop.mix, comp.probs, cl=cl) + sum(dgamma(prop.mix[(i-1),], shape = 1/G, scale = 1, log = TRUE))
ppropos = 0
r = exp(prop.post - past.post + ppropos)
}
if(runif(1)<r){
mix.probs[i,] = prop.mix[(i-1),]
accept.pi = accept.pi + 1
}else{
mix.probs[i,] = mix.probs[(i-1),]
}
}
##For each latent class and variable, sample the component probs
for(h in 1:G){
tmp.prop.all = matrix(NA, nrow = 2, ncol = P)
for(k in 1:P){
tmp.prop.all[,k] = rdirichlet(1, alpha = 500 * c(comp.probs[1,(i-1),h,k], comp.probs[2,(i-1),h,k])+ 1)
}
##tmp.prop.all = rdirichlet(1, alpha = 5*c(comp.probs[[1]][1,h,k], comp.probs[[2]][1,h,k]) + 1)#c(comp.probs[[1]][1,h,k], comp.probs[[2]][1,h,k]) + rnorm(2,0,0.1)
#tmp.prop.all = matrix(c(comp.probs[1,(i-1),h,], comp.probs[2,(i-1),h,]), ncol = P, byrow=TRUE) + rnorm(2*P,0,0.01)
#tmp.prop.all = apply(tmp.prop.all,2,FUN = function(vec){vec/sum(vec)})
if(min(tmp.prop.all) < 0 | max(tmp.prop.all) > 1){
r = -Inf
} else{
prop.probs = comp.probs[,(i-1):i,,]
prop.probs[1,2,,] = ifelse(is.na(prop.probs[1,2,,]), prop.probs[1,1,,], prop.probs[1,2,,])
prop.probs[2,,,] = 1-prop.probs[1,,,]
prop.tmp.probs = prop.probs
prop.tmp.probs[1,2,h,] = tmp.prop.all[1,]
prop.tmp.probs[2,,,] = 1 - prop.tmp.probs[1,,,]
mix.probs.hold = rbind(mix.probs[1,], mix.probs[i,])
if(h == 1){
bin.lik = likelihood.vec(M[i,],samp.size,G, sample = 2, counts.mat, mix.probs = mix.probs.hold, comp.probs =prop.probs, par = TRUE)
}
bin.lik.new = likelihood.vec(M[i,],samp.size,G, sample = 2, counts.mat, mix.probs = mix.probs.hold, comp.probs =prop.tmp.probs, par = TRUE)
prop.post = bin.lik.new + sum(log(ddirichlet(t(tmp.prop.all),alpha = rep(.5,2))))
past.post = bin.lik + sum(log(ddirichlet(cbind(comp.probs[1,i-1,h,], comp.probs[2,i-1,h,]),alpha = rep(.5,2))))
ppropos = 0
for(k in 1:P){
ppropos = ppropos + log(ddirichlet(c(comp.probs[1,(i-1),h,k], comp.probs[2,(i-1),h,k]),alpha = 500 *tmp.prop.all[,k] + 1)) - log(ddirichlet(tmp.prop.all[,k],alpha = 500 *c(comp.probs[1,(i-1),h,k], comp.probs[2,(i-1),h,k]) + 1))
}
r = exp(prop.post - past.post + ppropos)
}
if(runif(1)<r){
comp.probs[1,i,h,] = tmp.prop.all[1,]
accept[h] = accept[h] + 1
bin.lik = bin.lik.new
}else{
comp.probs[1,i,h,] = comp.probs[1,i-1,h,]
}
comp.probs[2,,,] = 1 - comp.probs[1,,,]
}##End of for
###Calculate marginal probability for sample
marg.prob[i,] = parApply(cl = cl, counts.mat,MARGIN = 1,FUN = twoWay.probs.vectorized, mix.probs= mix.probs, class.probs = comp.probs, sample = i, G=G)
###Calculate full probability for sample
#for(z in 1:nrow(full.mat)){
# full.prob[i,z] = fullTab.probs(mix.probs, comp.probs, vals = full.mat[z,], sample = i)
#}
print(i)
}##End of for
return(list("marg_probs" = marg.prob, "full_probs" = full.prob, "M" = M, "Psi" = comp.probs, "Pi" = mix.probs, "accept_rate_pi" = accept.pi, "accept_rate_psi" = accept, "accept_rate_M" = accept.M))
}##End of function
print("Read in the functions...")
base.dir = getwd()
data.dir = paste(base.dir,"/data",sep="")
results.dir = paste(base.dir,"/out",sep="")
packages.dir = paste(base.dir,"/packages",sep="")
df = read.dta(paste(data.dir, "da09681-0006.dta", sep="/"))
print("Reading in the data...")
df.samp = df[,c("SCN_15_A_Y04", "SCN_15_B_Y04", "SCN_15_C_Y04", "SCN_15_D_Y04", "SCN_15_E_Y04", "SCN_15_F_Y04", "SCN_15_G_Y04", "SCN_15_H_Y04", "SCN_15_I_Y04", "SCN_17_A_Y04","SCN_17_B_Y04", "SCN_17_C_Y04", "SCN_17_D_Y04", "SCN_17_E_Y04", "SCN_17_F_Y04", "SCN_17_G_Y04")]
df.samp = df.samp[which(df.samp$SCN_15_A_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_15_B_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_15_C_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_15_D_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_15_E_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_15_F_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_15_G_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_15_H_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_15_I_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_17_A_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_17_B_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_17_C_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_17_D_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_17_E_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_17_F_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_17_G_Y04 %in% c("Yes", "No")),]
##Two way margins
##Construct the matrix
##Construct the margins -- 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
freq = c()
keep = c()
P = ncol(df.samp)
comb.mat = matrix(ncol = P)
pairs = fread("privbayes_pairs_e5_0.csv")
for(i in 1:(P-1)){
for(j in (i+1):P){
counts = plyr::count(df.samp[,c(i,j)])
freq = c(freq, counts$freq)
tmp.total = max(apply(pairs, MARGIN = 1, FUN = function(vec) sum(vec %in% names(counts)[1:2])))
tmp.keep = ifelse(tmp.total == 2, TRUE, FALSE)
keep = c(keep, rep(tmp.keep,4))
tmp.mat = matrix(NA, nrow = 4, ncol = P)
tmp.mat[,i] = counts[,1]
tmp.mat[,j] = counts[,2]
comb.mat = rbind(comb.mat, tmp.mat)
}
}
comb.mat = comb.mat[-1,]
comb.mat = comb.mat[keep,]
freq = freq[keep]
comb.mat = comb.mat -5
##Run the algorithm
eps = .5*.75
reps = 2
cl = makeCluster(detectCores()-1)
clusterExport(cl, varlist = c("twoWay.probs.vectorized", "row.prob.vec","getProbs", "data.table", "rowprods", "P"))
for(zz in 100){
set.seed(zz)
start = Sys.time()
samps = mcmc.sampler.v3(freq, comb.mat, P=P,nsamples = 4000, eps = eps, samp.size = nrow(df.samp), G=7, results.dir = results.dir)
end = Sys.time()
print(end-start)
save(samps, file = paste0(results.dir, "/", eps, "_iter",zz,"c.Rdata"))
}
stopCluster(cl)
| /paper/NLTCS/code/nltcs_e0_5.R | no_license | michellepistner/BayesLCM | R | false | false | 29,302 | r | library(data.table)
library(plyr)
library(MCMCpack)
library(foreign)
library(matrixStats)
library(Rfast)
library(parallel)
print("Loaded the libraries...")
getProbs = function(comb, cols, samp, G.used, class.probs){
class.probs[cbind(comb,samp,G.used, cols)]
}
row.prob.vec <- function(combs.full, mix.probs, class.probs, P, sample, G){
iter.vec = rep(1:P, nrow(combs.full))
samp = rep(sample, nrow(combs.full)*ncol(combs.full))
combs = c(t(combs.full))
tmp.probs = sapply(1:G, FUN = getProbs, comb = combs, col = iter.vec, samp = samp, class.probs = class.probs)
tmp.probs = rowprods(matrix(tmp.probs, ncol = P, byrow = TRUE))
prob = sum(tmp.probs * rep(mix.probs[sample,],each = length(tmp.probs)/G))
return(prob)
}
row.prob <- function(combs, mix.probs, class.probs, P, sample, G){
##We need a fixed value for val1 and val2 and iterate over the combinations
iter.vec = 1:P
samp = rep(sample, length(combs))
tmp.probs = rowProds(t(sapply(1:G, FUN = getProbs, comb = combs, col = iter.vec, samp = samp)))
##tmp.probs = apply(apply(cbind(combs,iter.vec),1,FUN = function(info.vec, class.probs, sample){class.probs[info.vec[1], sample, , info.vec[2]]}, class.probs = class.probs, sample = sample), 1, prod)
prob = sum(mix.probs[sample,]*tmp.probs)
return(prob)
}
twoWay.probs.vectorized <- function(counts.vec, mix.probs, class.probs, index1=NULL, index2=NULL, val1=NULL, val2 = NULL, sample = 1, G){
if(is.null(index1)){
inds = which(!is.na(counts.vec))
index1 = inds[1]
index2 = inds[2]
val1 = counts.vec[inds[1]]
val2 = counts.vec[inds[2]]
}
##Find all possible combinations
P = dim(class.probs)[4]
###First, we need to find all combinations to sum over
p.opt = seq(1,P)[!(seq(1,P) %in% c(index1,index2))]
K = dim(class.probs)[1]
tmp.list = rep(list(seq(1,K)), length(p.opt))
combs = expand.grid(tmp.list)
names(combs) = p.opt
vec1 = rep(val1, nrow(combs))
vec2 = rep(val2, nrow(combs))
##Splice in these vectors at the correct place
combs.full = matrix(NA, nrow = nrow(combs), ncol = ncol(combs) + 2)
combs.full[,index1] = vec1
combs.full[,index2] = vec2
tmp.counter = 1
for(i in 1:ncol(combs.full)){
if(is.na(combs.full[1,i])){
combs.full[,i] = combs[,tmp.counter]
tmp.counter = tmp.counter + 1}
}
combs.full = data.table(combs.full)
prob = row.prob.vec(combs.full, mix.probs, class.probs, P, sample, G)
return(prob)
}#end of function
fullTab.probs <- function(mix.probs, class.probs, vals, sample = NULL){
##Vals: a P dim vector
if(length(vals) != dim(class.probs[[1]])[3]){
return("Evaluated combination is of the wrong length!!")
}
##Now, need to find the prob of each combination
##Easy way to do it: first find the prob of the combination for each latent class
##Then, multiply by the mixing probabilities
if(is.null(sample)){
##Take the mean and compute
mix.avg = apply(mix.probs, 2, "mean")
class.avg = list()
for(i in 1:length(class.probs)){
class.avg[[i]] = apply(class.probs[[i]], c(2,3), "mean")
}
prob.vec = rep(1, length(mix.avg))
for(i in 1:length(vals)){
prob.vec = prob.vec * class.avg[[vals[i]]][,i]
}
prob = sum(mix.avg * prob.vec)
} else{
prob.vec = rep(1, ncol(mix.probs))
for(i in 1:length(vals)){
prob.vec = prob.vec * class.probs[[vals[i]]][sample,,i]
}
prob = sum(mix.probs[sample,] * prob.vec)
}
return(prob)
}#End of function
lap.lik <- function(data,prop,E=-1/6){
val = 0
alpha = exp(E)
for(i in 1:length(data)){
val = val + abs(data[i] - prop[i])*log(alpha)
}
return(val)
}#end of function
dT2Lap <- function(noisyS,S,samp.size,E){
alpha = exp(E)
pmf = 0
for(i in 1:length(noisyS)){
if(noisyS[i] == 0){
pmf = pmf + log(alpha^abs(noisyS[i] - S[i])/(1+alpha))
}else if(noisyS[i] == samp.size){
pmf = pmf + log(alpha^(samp.size-S[i])/(1+alpha))
}else{
pmf = pmf + log((1-alpha)*alpha^abs(noisyS[i] - S[i])/(1+alpha))
}
}
return(pmf)
}
likelihood <- function(counts,samp.size, G, sample, counts.mat, mix.probs, comp.probs, index = NULL){
probs = rep(NA, length(counts))
if(is.null(index)){
for(y in 1:nrow(counts.mat)){
inds = which(!is.na(counts.mat[y,]))
val1 = counts.mat[y, inds[1]]
val2 = counts.mat[y, inds[2]]
probs[y] = twoWay.probs(mix.probs, comp.probs, index1 = inds[1], index2 = inds[2], val1, val2, sample = sample)
}##End of for
} else{
inds = which(!is.na(counts.mat[index,]))
val1 = counts.mat[index, inds[1]]
val2 = counts.mat[index, inds[2]]
probs = twoWay.probs(mix.probs, comp.probs, index1 = inds[1], index2 = inds[2], val1, val2, sample = sample)
}
lik = sum(dbinom(counts,samp.size, probs,log=T))
return(lik)
}#end of function
likelihood.vec <- function(counts,samp.size, G, sample, counts.mat, mix.probs, comp.probs, index = NULL, par = FALSE, clust = cl){
probs = rep(NA, length(counts))
if(is.null(index)){
if(par == TRUE){
probs = parApply(cl = clust, counts.mat,MARGIN = 1,FUN = twoWay.probs.vectorized, mix.probs= mix.probs, class.probs = comp.probs, sample = sample, G=G)
} else{
for(y in 1:nrow(counts.mat)){
inds = which(!is.na(counts.mat[y,]))
val1 = counts.mat[y, inds[1]]
val2 = counts.mat[y, inds[2]]
probs[y] = twoWay.probs.vectorized(counts.vec = NULL, mix.probs, comp.probs, index1 = inds[1], index2 = inds[2], val1, val2, sample = sample, G=G)
}##End of for
}
} else{
inds = which(!is.na(counts.mat[index,]))
val1 = counts.mat[index, inds[1]]
val2 = counts.mat[index, inds[2]]
probs = twoWay.probs.vectorized(counts.vec = NULL, mix.probs, comp.probs, index1 = inds[1], index2 = inds[2], val1, val2, sample = sample, G=G)
}
lik = sum(dbinom(counts,samp.size, probs,log=T))
return(lik)
}#end of function
###Sampling function
mcmc.sampler <- function(counts, counts.mat, eps = 1, P, G = 5, nsamples = 2000, samp.size, E.var = NULL){
###1.) Create noisy counts
Q = length(counts)/(length(unique(!is.na(c(comb.mat))))^2)
E = -(eps/Q)/2
if(is.null(E.var)){E.var = E}
errs = rgeom(length(counts),1-exp(E)) - rgeom(length(counts),1-exp(E))
noisyM = counts + errs
###Initialize the starting values
##Mixing probabilities
mix.probs = matrix(NA, nrow=nsamples, ncol = G)
mix.probs[1,] = rep(1/G,G) ##Starting value: equal weights
###Counts
M = matrix(NA, nrow = nsamples, ncol = length(noisyM))
M[1,] = noisyM + rgeom(length(counts),1-exp(E)) - rgeom(length(counts),1-exp(E))
M[1,] = ifelse(M[1,] < 0, 0, M[1,])
M[1,] = ifelse(M[1,] > samp.size, samp.size, M[1,])
###Psi
###Currently only implemented for two binary variables
comp.probs = list()
comp.probs[[1]] = array(NA, dim = c(nsamples, G, P))
##Random start
start.val = cbind(comb.mat, noisyM/samp.size)
tmp.mat = matrix(NA, nrow = G, ncol = P)
for(i in 1:ncol(tmp.mat)){
tmp.mat[,i] = mean(start.val[which(start.val[,i] == 1),ncol(tmp.mat) + 1])
}
comp.probs[[1]][1,,] = tmp.mat
comp.probs[[2]] = 1 - comp.probs[[1]]
##Marginal probabilities
marg.prob = matrix(NA, nsamples, length(noisyM))
##Full probabilities
full.prob = matrix(NA, nsamples, 2^P)
tmp.list = rep(list(seq(1,2)), P)
full.mat = expand.grid(tmp.list)
full.mat = as.matrix(full.mat)
accept = matrix(0,nrow=G, ncol=P)
accept.pi = 0
accept.M = rep(0, length(noisyM))
for(i in 2:nsamples){
##Sample for M
##Proposal
for(q in 1:length(noisyM)){
M.prop = M[i,]
M.prop[q] = noisyM[q] + rgeom(1,1-exp(E.var)) - rgeom(1,1-exp(E.var))
if(q != length(noisyM)){M.prop[(q+1):length(noisyM)] = M[(i-1),(q+1):length(noisyM)]}
if(M.prop[q]< 0 | M.prop[q] > samp.size){
r = -Inf
}else{
prop.post = lap.lik(M.prop[q],noisyM[q], E=E) + likelihood(M.prop[q], samp.size, G, i-1, counts.mat, mix.probs, comp.probs, index = q)
past.post = lap.lik(M[(i-1),q], noisyM[q], E=E) + likelihood(M[(i-1),q],samp.size, G, i-1, counts.mat, mix.probs, comp.probs, index = q)
ppropos = dT2Lap(M[(i-1),q],M.prop[q],samp.size,E) - dT2Lap(M.prop[q],M[(i-1),q],samp.size,E)
r = exp(prop.post - past.post + ppropos)
}
if(runif(1)<r){
M[i,q] = M.prop[q]
accept.M[q] = accept.M[q] + 1
}else{
M[i,q] = M[(i-1),q]
}
print(q)
}
##Sample for mixing probabilities
prop.mix = mix.probs
prop.mix[(i-1),] = mix.probs[(i-1),] + rnorm(G,0,sd = 0.005)
prop.mix[(i-1),] = prop.mix[(i-1),]/sum(prop.mix[(i-1),])
if(min(prop.mix[(i-1),]) < 0){
r = -Inf
}else{
prop.post = likelihood(M[i,], samp.size, G, sample = i-1, counts.mat, mix.probs, comp.probs) + log(ddirichlet(mix.probs[(i-1),], alpha = rep(1/G,G)))
past.post = likelihood(M[i,], samp.size, G, sample = i-1, counts.mat, prop.mix, comp.probs) + log(ddirichlet(prop.mix[(i-1),], alpha = rep(1/G,G)))
##ppropos = log(ddirichlet(mix.probs[(i-1),], alpha = prop.mix[(i-1),])) - log(ddirichlet(prop.mix[(i-1),], alpha = mix.probs[(i-1),]))
ppropos = 0
r = exp(prop.post - past.post + ppropos)
}
if(runif(1)<r){
mix.probs[i,] = prop.mix[(i-1),]
accept.pi = accept.pi + 1
}else{
mix.probs[i,] = mix.probs[(i-1),]
}
##For each latent class and variable, sample the component probs
for(k in 1:P){
for(h in 1:G){
##tmp.prop.all = rdirichlet(1, alpha = 500*c(comp.probs[[1]][1,h,k], comp.probs[[2]][1,h,k]) + 1)#c(comp.probs[[1]][1,h,k], comp.probs[[2]][1,h,k]) + rnorm(2,0,0.1)
tmp.prop.all = c(comp.probs[[1]][(i-1),h,k], comp.probs[[2]][(i-1),h,k]) + rnorm(2,0,0.01)
tmp.prop.all = tmp.prop.all / sum(tmp.prop.all)
if(min(tmp.prop.all) < 0 | max(tmp.prop.all) > 1){
r = -Inf
} else{
tmp.prop = tmp.prop.all[1]
prop.probs = list()
prop.probs[[1]] = comp.probs[[1]][(i-1):i,,]
prop.probs[[1]][2,,] = ifelse(is.na(prop.probs[[1]][2,,]), prop.probs[[1]][1,,], prop.probs[[1]][2,,])
prop.probs[[2]] = 1-prop.probs[[1]]
prop.tmp.probs = prop.probs
prop.tmp.probs[[1]][2,h,k] = tmp.prop
prop.tmp.probs[[2]] = 1 - prop.tmp.probs[[1]]
mix.probs.hold = rbind(mix.probs[1,], mix.probs[i,])
prop.post = likelihood(M[i,],samp.size,G, sample = 2, counts.mat, mix.probs.hold, prop.tmp.probs) + log(ddirichlet(c(tmp.prop, 1-tmp.prop),alpha = rep(.5,2)))
past.post = likelihood(M[i,],samp.size,G, sample = 2, counts.mat, mix.probs.hold, prop.probs) + log(ddirichlet(c(comp.probs[[1]][i-1,h,k], comp.probs[[2]][i-1,h,k]),alpha = rep(.5,2)))
##ppropos = log(ddirichlet(c(comp.probs[[1]][i-1,h,k], comp.probs[[2]][i-1,h,k]),alpha = tmp.prop.all)) - log(ddirichlet(tmp.prop.all,alpha = c(comp.probs[[1]][i-1,h,k], comp.probs[[2]][i-1,h,k])))
ppropos = 0
r = exp(prop.post - past.post + ppropos)
}
if(runif(1)<r){
comp.probs[[1]][i,h,k] = tmp.prop
accept[h,k] = accept[h,k] + 1
}else{
comp.probs[[1]][i,h,k] = comp.probs[[1]][i-1,h,k]
}
comp.probs[[2]] = 1 - comp.probs[[1]]
}##End of for
}##End of for
###Calculate marginal probability for sample
for(y in 1:nrow(counts.mat)){
inds = which(!is.na(counts.mat[y,]))
val1 = counts.mat[y, inds[1]]
val2 = counts.mat[y, inds[2]]
marg.prob[i,y] = twoWay.probs(mix.probs, comp.probs, index1 = inds[1], index2 = inds[2], val1, val2, sample = i)
}##End of for
###Calculate full probability for sample
#for(z in 1:nrow(full.mat)){
# full.prob[i,z] = fullTab.probs(mix.probs, comp.probs, vals = full.mat[z,], sample = i)
#}
print(i)
}##End of for
return(list("marg_probs" = marg.prob, "full_probs" = full.prob, "M" = M, "Psi" = comp.probs, "Pi" = mix.probs, "accept_rate_pi" = accept.pi, "accept_rate_psi" = accept, "accept_rate_M" = accept.M))
}##End of function
###Sampling function
mcmc.sampler.v2 <- function(counts, counts.mat, eps = 1, P, G = 5, nsamples = 2000, samp.size, E.var = NULL){
###1.) Create noisy counts
Q = length(counts)/(length(unique(!is.na(c(comb.mat))))^2)
E = -(eps/Q)/2
if(is.null(E.var)){E.var = E}
errs = rgeom(length(counts),1-exp(E)) - rgeom(length(counts),1-exp(E))
noisyM = counts + errs
###Initialize the starting values
##Mixing probabilities
mix.probs = matrix(NA, nrow=nsamples, ncol = G)
mix.probs[1,] = rep(1/G,G) ##Starting value: equal weights
###Counts
M = matrix(NA, nrow = nsamples, ncol = length(noisyM))
M[1,] = noisyM + rgeom(length(counts),1-exp(E)) - rgeom(length(counts),1-exp(E))
M[1,] = ifelse(M[1,] < 0, 0, M[1,])
M[1,] = ifelse(M[1,] > samp.size, samp.size, M[1,])
###Psi
###Currently only implemented for two binary variables
comp.probs = array(NA, dim = c(2,nsamples,G,P))
##Random start
start.val = cbind(comb.mat, noisyM/samp.size)
tmp.mat = matrix(NA, nrow = G, ncol = P)
for(i in 1:ncol(tmp.mat)){
tmp.mat[,i] = mean(start.val[which(start.val[,i] == 1),ncol(tmp.mat) + 1])
}
comp.probs[1,1,,] = tmp.mat
comp.probs[2,,,] = 1 - comp.probs[1,,,]
##Marginal probabilities
marg.prob = matrix(NA, nsamples, length(noisyM))
##Full probabilities
full.prob = matrix(NA, nsamples, 2^P)
tmp.list = rep(list(seq(1,2)), P)
full.mat = expand.grid(tmp.list)
full.mat = as.matrix(full.mat)
accept = matrix(0,nrow=G, ncol=P)
accept.pi = 0
accept.M = rep(0, length(noisyM))
for(i in 2:nsamples){
##Sample for M
##Proposal
for(q in 1:length(noisyM)){
M.prop = M[i,]
M.prop[q] = noisyM[q] + rgeom(1,1-exp(E.var)) - rgeom(1,1-exp(E.var))
if(q != length(noisyM)){M.prop[(q+1):length(noisyM)] = M[(i-1),(q+1):length(noisyM)]}
if(M.prop[q]< 0 | M.prop[q] > samp.size){
r = -Inf
}else{
prop.post = lap.lik(M.prop[q],noisyM[q], E=E) + likelihood.vec(M.prop[q], samp.size, G, i-1, counts.mat, mix.probs, comp.probs, index = q)
if(q == 1){
past.post = lap.lik(M[(i-1),q], noisyM[q], E=E) + likelihood.vec(M[(i-1),q],samp.size, G, i-1, counts.mat, mix.probs, comp.probs, index = q)
}
ppropos = dT2Lap(M[(i-1),q],M.prop[q],samp.size,E) - dT2Lap(M.prop[q],M[(i-1),q],samp.size,E)
r = exp(prop.post - past.post + ppropos)
}
if(runif(1)<r){
M[i,q] = M.prop[q]
accept.M[q] = accept.M[q] + 1
past.post = prop.post
}else{
M[i,q] = M[(i-1),q]
past.post = past.post
}
print(q)
}
##Sample for mixing probabilities
prop.mix = mix.probs
prop.mix[(i-1),] = mix.probs[(i-1),] + rnorm(G,0,sd = 0.005)
prop.mix[(i-1),] = prop.mix[(i-1),]/sum(prop.mix[(i-1),])
if(min(prop.mix[(i-1),]) < 0){
r = -Inf
}else{
prop.post = likelihood.vec(M[i,], samp.size, G, sample = i-1, counts.mat, mix.probs, comp.probs, par = TRUE) + log(ddirichlet(mix.probs[(i-1),], alpha = rep(1/G,G)))
past.post = likelihood.vec(M[i,], samp.size, G, sample = i-1, counts.mat, prop.mix, comp.probs) + log(ddirichlet(prop.mix[(i-1),], alpha = rep(1/G,G)))
##ppropos = log(ddirichlet(mix.probs[(i-1),], alpha = prop.mix[(i-1),])) - log(ddirichlet(prop.mix[(i-1),], alpha = mix.probs[(i-1),]))
ppropos = 0
r = exp(prop.post - past.post + ppropos)
}
if(runif(1)<r){
mix.probs[i,] = prop.mix[(i-1),]
accept.pi = accept.pi + 1
past.post = prop.post
}else{
mix.probs[i,] = mix.probs[(i-1),]
past.post = past.post
}
##For each latent class and variable, sample the component probs
for(k in 1:P){
for(h in 1:G){
##tmp.prop.all = rdirichlet(1, alpha = 500*c(comp.probs[[1]][1,h,k], comp.probs[[2]][1,h,k]) + 1)#c(comp.probs[[1]][1,h,k], comp.probs[[2]][1,h,k]) + rnorm(2,0,0.1)
tmp.prop.all = c(comp.probs[1,(i-1),h,k], comp.probs[2,(i-1),h,k]) + rnorm(2,0,0.01)
tmp.prop.all = tmp.prop.all / sum(tmp.prop.all)
if(min(tmp.prop.all) < 0 | max(tmp.prop.all) > 1){
r = -Inf
} else{
tmp.prop = tmp.prop.all[1]
prop.probs = comp.probs[,(i-1):i,,]
prop.probs[1,2,,] = ifelse(is.na(prop.probs[1,2,,]), prop.probs[1,1,,], prop.probs[1,2,,])
prop.probs[2,,,] = 1-prop.probs[1,,,]
prop.tmp.probs = prop.probs
prop.tmp.probs[1,2,h,k] = tmp.prop
prop.tmp.probs[2,,,] = 1 - prop.tmp.probs[1,,,]
mix.probs.hold = rbind(mix.probs[1,], mix.probs[i,])
start = Sys.time()
prop.post = likelihood.vec(M[i,],samp.size,G, sample = 2, counts.mat, mix.probs = mix.probs.hold, comp.probs =prop.tmp.probs, par = TRUE) + log(ddirichlet(c(tmp.prop, 1-tmp.prop),alpha = rep(.5,2)))
end = Sys.time()
##past.post = likelihood.vec(M[i,],samp.size,G, sample = 2, counts.mat, mix.probs = mix.probs.hold, comp.probs = prop.probs) + log(ddirichlet(c(comp.probs[1,i-1,h,k], comp.probs[2,i-1,h,k]),alpha = rep(.5,2)))
##ppropos = log(ddirichlet(c(comp.probs[[1]][i-1,h,k], comp.probs[[2]][i-1,h,k]),alpha = tmp.prop.all)) - log(ddirichlet(tmp.prop.all,alpha = c(comp.probs[[1]][i-1,h,k], comp.probs[[2]][i-1,h,k])))
ppropos = 0
r = exp(prop.post - past.post + ppropos)
}
if(runif(1)<r){
comp.probs[1,i,h,k] = tmp.prop
accept[h,k] = accept[h,k] + 1
past.post = prop.post
}else{
comp.probs[1,i,h,k] = comp.probs[1,i-1,h,k]
past.post = past.post
}
comp.probs[2,,,] = 1 - comp.probs[1,,,]
}##End of for
}##End of for
###Calculate marginal probability for sample
marg.prob[i,] = parApply(cl = cl, counts.mat,MARGIN = 1,FUN = twoWay.probs.vectorized, mix.probs= mix.probs, class.probs = comp.probs, sample = i, G=G)
###Calculate full probability for sample
#for(z in 1:nrow(full.mat)){
# full.prob[i,z] = fullTab.probs(mix.probs, comp.probs, vals = full.mat[z,], sample = i)
#}
print(i)
}##End of for
return(list("marg_probs" = marg.prob, "M" = M, "Psi" = comp.probs, "Pi" = mix.probs, "accept_rate_pi" = accept.pi, "accept_rate_psi" = accept, "accept_rate_M" = accept.M))
}##End of function
mcmc.sampler.v3 <- function(counts, counts.mat, eps = 1, P, G = 5, nsamples = 2000, samp.size, E.var = NULL, results.dir, .methodPi = "normal"){
###1.) Create noisy counts
Q = length(counts)/(length(unique(!is.na(c(comb.mat))))^2)
E = -(eps/Q)/2
if(is.null(E.var)){E.var = E}
errs = rgeom(length(counts),1-exp(E)) - rgeom(length(counts),1-exp(E))
noisyM = counts + errs
###Initialize the starting values
##Mixing probabilities
load(paste0(results.dir, "/0.5_iter100b.Rdata"))
mix.probs = matrix(NA, nrow=nsamples, ncol = G)
mix.probs[1,] = samps$Pi[4000,] ##rdirichlet(1,rep(1/G,G)) ##Starting value: equal weights
###Counts
M = matrix(NA, nrow = nsamples, ncol = length(noisyM))
M[1,] = noisyM + rgeom(length(counts),1-exp(E)) - rgeom(length(counts),1-exp(E))
M[1,] = ifelse(M[1,] < 0, 0, M[1,])
M[1,] = ifelse(M[1,] > samp.size, samp.size, M[1,])
###Psi
###Currently only implemented for two binary variables
comp.probs = array(NA, dim = c(2,nsamples,G,P))
##Random start
#start.val = cbind(comb.mat, noisyM/samp.size, noisyM)
#tmp.mat = matrix(NA, nrow = G, ncol = P)
#for(i in 1:ncol(tmp.mat)){
# nrow.starts = nrow(start.val[which(start.val[,i] == 1),])
# tmp.mat[,i] = sum(start.val[which(start.val[,i] == 1),ncol(tmp.mat) + 2])/(samp.size*nrow.starts/2)
#
#}
comp.probs[1,1,,] = samps$Psi[1,4000,,]
comp.probs[2,,,] = 1 - comp.probs[1,,,]
##Marginal probabilities
marg.prob = matrix(NA, nsamples, length(noisyM))
##Full probabilities
full.prob = matrix(NA, nsamples, 2^P)
tmp.list = rep(list(seq(1,2)), P)
full.mat = expand.grid(tmp.list)
full.mat = as.matrix(full.mat)
accept = rep(0,G)
accept.pi = 0
accept.M = rep(0, length(noisyM))
for(i in 2:nsamples){
##Sample for M
##Proposal
for(q in 1:length(noisyM)){
M.prop = M[(i-1),]
M.prop[q] = noisyM[q] + rgeom(1,1-exp(E.var)) - rgeom(1,1-exp(E.var))
if(q != length(noisyM)){M.prop[(q+1):length(noisyM)] = M[(i-1),(q+1):length(noisyM)]}
if(M.prop[q]< 0 | M.prop[q] > samp.size){
r = -Inf
}else{
bin.lik.new = likelihood.vec(M.prop[q], samp.size, G, i-1, counts.mat, mix.probs, comp.probs, index = q)
prop.post = lap.lik(M.prop[q],noisyM[q], E=E) + bin.lik.new
bin.lik= likelihood.vec(M[(i-1),q],samp.size, G, i-1, counts.mat, mix.probs, comp.probs, index = q)
past.post = lap.lik(M[(i-1),q], noisyM[q], E=E) + bin.lik
ppropos = dT2Lap(M[(i-1),q],M.prop[q],samp.size,E) - dT2Lap(M.prop[q],M[(i-1),q],samp.size,E)
r = exp(prop.post - past.post + ppropos)
}
if(runif(1)<r){
M[i,q] = M.prop[q]
accept.M[q] = accept.M[q] + 1
} else{
M[i,q] = M[(i-1),q]
}
}
##Sample for mixing probabilities
if(.methodPi == "dirichlet"){
prop.mix = mix.probs
prop.mix[(i-1),] = rdirichlet(1, alpha = 50 * mix.probs[(i-1),] + .5) ###50 * mix.probs[(i-1),] + 1
if(min(prop.mix[(i-1),]) < 0){
r = -Inf
}else{
prop.post = likelihood.vec(M[i,], samp.size, G, sample = i-1, counts.mat, prop.mix, comp.probs, par = TRUE) + log(ddirichlet(prop.mix[(i-1),], alpha = rep(1/G,G)))
past.post = likelihood.vec(M[i,], samp.size, G, sample = i-1, counts.mat, mix.probs, comp.probs, par = TRUE) + log(ddirichlet(mix.probs[(i-1),], alpha = rep(1/G,G)))
ppropos = log(ddirichlet(mix.probs[(i-1),], alpha = 50 * prop.mix[(i-1),] + .5)) - log(ddirichlet(prop.mix[(i-1),], alpha = 50 * mix.probs[(i-1),] +.5))
##ppropos = log(ddirichlet(mix.probs[(i-1),], alpha = rep(1/G, G))) - log(ddirichlet(prop.mix[(i-1),], alpha = rep(1/G,G)))
r = exp(prop.post - past.post + ppropos)
}
if(runif(1)<r){
mix.probs[i,] = prop.mix[(i-1),]
accept.pi = accept.pi + 1
}else{
mix.probs[i,] = mix.probs[(i-1),]
}
} else{
prop.mix = mix.probs
eta = mix.probs[(i-1),] + rnorm(G,0,sd = 0.01)
prop.mix[(i-1),] = eta
prop.mix[(i-1),] = prop.mix[(i-1),]/sum(prop.mix[(i-1),])
if(min(prop.mix[(i-1),]) < 0){
r = -Inf
}else{
prop.post = likelihood.vec(M[i,], samp.size, G, sample = i-1, counts.mat, mix.probs, comp.probs, cl=cl) + sum(dgamma(mix.probs[(i-1),], shape = 1/G, scale = 1, log = TRUE))
past.post = likelihood.vec(M[i,], samp.size, G, sample = i-1, counts.mat, prop.mix, comp.probs, cl=cl) + sum(dgamma(prop.mix[(i-1),], shape = 1/G, scale = 1, log = TRUE))
ppropos = 0
r = exp(prop.post - past.post + ppropos)
}
if(runif(1)<r){
mix.probs[i,] = prop.mix[(i-1),]
accept.pi = accept.pi + 1
}else{
mix.probs[i,] = mix.probs[(i-1),]
}
}
##For each latent class and variable, sample the component probs
for(h in 1:G){
tmp.prop.all = matrix(NA, nrow = 2, ncol = P)
for(k in 1:P){
tmp.prop.all[,k] = rdirichlet(1, alpha = 500 * c(comp.probs[1,(i-1),h,k], comp.probs[2,(i-1),h,k])+ 1)
}
##tmp.prop.all = rdirichlet(1, alpha = 5*c(comp.probs[[1]][1,h,k], comp.probs[[2]][1,h,k]) + 1)#c(comp.probs[[1]][1,h,k], comp.probs[[2]][1,h,k]) + rnorm(2,0,0.1)
#tmp.prop.all = matrix(c(comp.probs[1,(i-1),h,], comp.probs[2,(i-1),h,]), ncol = P, byrow=TRUE) + rnorm(2*P,0,0.01)
#tmp.prop.all = apply(tmp.prop.all,2,FUN = function(vec){vec/sum(vec)})
if(min(tmp.prop.all) < 0 | max(tmp.prop.all) > 1){
r = -Inf
} else{
prop.probs = comp.probs[,(i-1):i,,]
prop.probs[1,2,,] = ifelse(is.na(prop.probs[1,2,,]), prop.probs[1,1,,], prop.probs[1,2,,])
prop.probs[2,,,] = 1-prop.probs[1,,,]
prop.tmp.probs = prop.probs
prop.tmp.probs[1,2,h,] = tmp.prop.all[1,]
prop.tmp.probs[2,,,] = 1 - prop.tmp.probs[1,,,]
mix.probs.hold = rbind(mix.probs[1,], mix.probs[i,])
if(h == 1){
bin.lik = likelihood.vec(M[i,],samp.size,G, sample = 2, counts.mat, mix.probs = mix.probs.hold, comp.probs =prop.probs, par = TRUE)
}
bin.lik.new = likelihood.vec(M[i,],samp.size,G, sample = 2, counts.mat, mix.probs = mix.probs.hold, comp.probs =prop.tmp.probs, par = TRUE)
prop.post = bin.lik.new + sum(log(ddirichlet(t(tmp.prop.all),alpha = rep(.5,2))))
past.post = bin.lik + sum(log(ddirichlet(cbind(comp.probs[1,i-1,h,], comp.probs[2,i-1,h,]),alpha = rep(.5,2))))
ppropos = 0
for(k in 1:P){
ppropos = ppropos + log(ddirichlet(c(comp.probs[1,(i-1),h,k], comp.probs[2,(i-1),h,k]),alpha = 500 *tmp.prop.all[,k] + 1)) - log(ddirichlet(tmp.prop.all[,k],alpha = 500 *c(comp.probs[1,(i-1),h,k], comp.probs[2,(i-1),h,k]) + 1))
}
r = exp(prop.post - past.post + ppropos)
}
if(runif(1)<r){
comp.probs[1,i,h,] = tmp.prop.all[1,]
accept[h] = accept[h] + 1
bin.lik = bin.lik.new
}else{
comp.probs[1,i,h,] = comp.probs[1,i-1,h,]
}
comp.probs[2,,,] = 1 - comp.probs[1,,,]
}##End of for
###Calculate marginal probability for sample
marg.prob[i,] = parApply(cl = cl, counts.mat,MARGIN = 1,FUN = twoWay.probs.vectorized, mix.probs= mix.probs, class.probs = comp.probs, sample = i, G=G)
###Calculate full probability for sample
#for(z in 1:nrow(full.mat)){
# full.prob[i,z] = fullTab.probs(mix.probs, comp.probs, vals = full.mat[z,], sample = i)
#}
print(i)
}##End of for
return(list("marg_probs" = marg.prob, "full_probs" = full.prob, "M" = M, "Psi" = comp.probs, "Pi" = mix.probs, "accept_rate_pi" = accept.pi, "accept_rate_psi" = accept, "accept_rate_M" = accept.M))
}##End of function
print("Read in the functions...")
base.dir = getwd()
data.dir = paste(base.dir,"/data",sep="")
results.dir = paste(base.dir,"/out",sep="")
packages.dir = paste(base.dir,"/packages",sep="")
df = read.dta(paste(data.dir, "da09681-0006.dta", sep="/"))
print("Reading in the data...")
df.samp = df[,c("SCN_15_A_Y04", "SCN_15_B_Y04", "SCN_15_C_Y04", "SCN_15_D_Y04", "SCN_15_E_Y04", "SCN_15_F_Y04", "SCN_15_G_Y04", "SCN_15_H_Y04", "SCN_15_I_Y04", "SCN_17_A_Y04","SCN_17_B_Y04", "SCN_17_C_Y04", "SCN_17_D_Y04", "SCN_17_E_Y04", "SCN_17_F_Y04", "SCN_17_G_Y04")]
df.samp = df.samp[which(df.samp$SCN_15_A_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_15_B_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_15_C_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_15_D_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_15_E_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_15_F_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_15_G_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_15_H_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_15_I_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_17_A_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_17_B_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_17_C_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_17_D_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_17_E_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_17_F_Y04 %in% c("Yes", "No")),]
df.samp = df.samp[which(df.samp$SCN_17_G_Y04 %in% c("Yes", "No")),]
##Two way margins
##Construct the matrix
##Construct the margins -- 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
freq = c()
keep = c()
P = ncol(df.samp)
comb.mat = matrix(ncol = P)
pairs = fread("privbayes_pairs_e5_0.csv")
for(i in 1:(P-1)){
for(j in (i+1):P){
counts = plyr::count(df.samp[,c(i,j)])
freq = c(freq, counts$freq)
tmp.total = max(apply(pairs, MARGIN = 1, FUN = function(vec) sum(vec %in% names(counts)[1:2])))
tmp.keep = ifelse(tmp.total == 2, TRUE, FALSE)
keep = c(keep, rep(tmp.keep,4))
tmp.mat = matrix(NA, nrow = 4, ncol = P)
tmp.mat[,i] = counts[,1]
tmp.mat[,j] = counts[,2]
comb.mat = rbind(comb.mat, tmp.mat)
}
}
comb.mat = comb.mat[-1,]
comb.mat = comb.mat[keep,]
freq = freq[keep]
comb.mat = comb.mat -5
##Run the algorithm
eps = .5*.75
reps = 2
cl = makeCluster(detectCores()-1)
clusterExport(cl, varlist = c("twoWay.probs.vectorized", "row.prob.vec","getProbs", "data.table", "rowprods", "P"))
for(zz in 100){
set.seed(zz)
start = Sys.time()
samps = mcmc.sampler.v3(freq, comb.mat, P=P,nsamples = 4000, eps = eps, samp.size = nrow(df.samp), G=7, results.dir = results.dir)
end = Sys.time()
print(end-start)
save(samps, file = paste0(results.dir, "/", eps, "_iter",zz,"c.Rdata"))
}
stopCluster(cl)
|
var1 <- c(1,2,3,1,2)
var2 <- factor(c(1,2,3,1,2))
var1
var2
var1 + 2
var2 + 2
class(var1)
class(var2)
levels(var1)
levels(var2)
var3 <- c("a", "b", "b", "c")
var4 <- factor(c("a", "b", "b", "c"))
var3
var4
class(var3)
class(var4)
mean(var1)
mean(var2)
var2 <- as.numeric(var2)
mean(var2)
class(var2)
levels(var2)
| /Further_/script_15_2.R | no_license | Dodant/Data-Science-with-R | R | false | false | 323 | r | var1 <- c(1,2,3,1,2)
var2 <- factor(c(1,2,3,1,2))
var1
var2
var1 + 2
var2 + 2
class(var1)
class(var2)
levels(var1)
levels(var2)
var3 <- c("a", "b", "b", "c")
var4 <- factor(c("a", "b", "b", "c"))
var3
var4
class(var3)
class(var4)
mean(var1)
mean(var2)
var2 <- as.numeric(var2)
mean(var2)
class(var2)
levels(var2)
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ex25.25}
\alias{ex25.25}
\title{Data from Exercise 25.25}
\format{\Sexpr[results=rd]{bps5data:::doc_data("ex25.25") }}
\source{
\url{ http://bcs.whfreeman.com/bps5e/content/cat_030/PC-Text.zip }
}
\usage{
data("ex25.25")
}
\description{
Data from Exercise 25.25 of \emph{The Basic Practice of Statistics}, 5th edition.
}
\references{
Moore, David S. 2009. \emph{The Basic Practice of Statistics}. 5th edition. New York: W. H. Freeman.
}
\seealso{
Other datasets from Chapter 25 of \emph{BPS} 5th ed.: \code{\link{eg25.01}};
\code{\link{eg25.05}}; \code{\link{eg25.06}};
\code{\link{eg25.09}}; \code{\link{eg25.10}};
\code{\link{ex25.01}}; \code{\link{ex25.02}};
\code{\link{ex25.05}}; \code{\link{ex25.06}};
\code{\link{ex25.07}}; \code{\link{ex25.08}};
\code{\link{ex25.11}}; \code{\link{ex25.12}};
\code{\link{ex25.13}}; \code{\link{ex25.14}};
\code{\link{ex25.15}}; \code{\link{ex25.16}};
\code{\link{ex25.18}}; \code{\link{ex25.19}};
\code{\link{ex25.21}}; \code{\link{ex25.22}};
\code{\link{ex25.23}}; \code{\link{ex25.24}};
\code{\link{ex25.26}}; \code{\link{ex25.27}};
\code{\link{ex25.28}}; \code{\link{ex25.29}};
\code{\link{ex25.30}}; \code{\link{ex25.42}};
\code{\link{ex25.43}}; \code{\link{ex25.44}};
\code{\link{ex25.45}}; \code{\link{ex25.48}};
\code{\link{ex25.49}}; \code{\link{ta25.01}}
}
| /man/ex25.25.Rd | no_license | jrnold/bps5data | R | false | false | 1,456 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ex25.25}
\alias{ex25.25}
\title{Data from Exercise 25.25}
\format{\Sexpr[results=rd]{bps5data:::doc_data("ex25.25") }}
\source{
\url{ http://bcs.whfreeman.com/bps5e/content/cat_030/PC-Text.zip }
}
\usage{
data("ex25.25")
}
\description{
Data from Exercise 25.25 of \emph{The Basic Practice of Statistics}, 5th edition.
}
\references{
Moore, David S. 2009. \emph{The Basic Practice of Statistics}. 5th edition. New York: W. H. Freeman.
}
\seealso{
Other datasets from Chapter 25 of \emph{BPS} 5th ed.: \code{\link{eg25.01}};
\code{\link{eg25.05}}; \code{\link{eg25.06}};
\code{\link{eg25.09}}; \code{\link{eg25.10}};
\code{\link{ex25.01}}; \code{\link{ex25.02}};
\code{\link{ex25.05}}; \code{\link{ex25.06}};
\code{\link{ex25.07}}; \code{\link{ex25.08}};
\code{\link{ex25.11}}; \code{\link{ex25.12}};
\code{\link{ex25.13}}; \code{\link{ex25.14}};
\code{\link{ex25.15}}; \code{\link{ex25.16}};
\code{\link{ex25.18}}; \code{\link{ex25.19}};
\code{\link{ex25.21}}; \code{\link{ex25.22}};
\code{\link{ex25.23}}; \code{\link{ex25.24}};
\code{\link{ex25.26}}; \code{\link{ex25.27}};
\code{\link{ex25.28}}; \code{\link{ex25.29}};
\code{\link{ex25.30}}; \code{\link{ex25.42}};
\code{\link{ex25.43}}; \code{\link{ex25.44}};
\code{\link{ex25.45}}; \code{\link{ex25.48}};
\code{\link{ex25.49}}; \code{\link{ta25.01}}
}
|
/readfile.R | no_license | w4560000/RProject2 | R | false | false | 2,816 | r | ||
######################################
# UK AIRPORTS PUNCTUALITY - DATA I/O #
######################################
## PREPARATION ------------------------------------------------------------------------------------------------------------------
# load packages
pkgs <- c('data.table', 'rvest')
invisible(lapply(pkgs, require, character.only = TRUE))
# set constants
data_path <- './uk_airports_punctuality/data'
url_home <- 'https://www.caa.co.uk'
url_pref <- paste0(url_home, '/Data-and-analysis/UK-aviation-market/Flight-reliability/Datasets/Punctuality-data/Punctuality-statistics-')
# load data
vars <- fread(file.path(data_path, 'vars.csv'))
## MAIN DATASET -----------------------------------------------------------------------------------------------------------------
# create empty dataset
dts <- setnames( data.table(matrix(nrow = 0, ncol = nrow(vars))), vars$code)
# download data
for(x in 1990:2018){
# read list of files
fnames <- read_html(paste0(url_pref, x)) %>%
html_nodes('#ctl00_cphBody_dzMain_uxColumnDisplay_ctl00_uxControlColumn_ctl01_uxWidgetHost_uxUpdatePanel a') %>%
html_attr('href')
# separates "good" ones
fnames <- fnames[grepl('arr|dep', tolower(fnames))]
# read all files, and bind them in dataset
for(fn in fnames){
y <- fread(paste0(url_home, fn), select = vars$name, col.names = vars$code, na.strings = '')
dts <- rbindlist(list( dts, y ))
}
}
# convert to factor
cols <- vars[type == 'f', code]
dts[, (cols) := lapply(.SD, factor), .SDcols = cols]
# find total flights, then convert percent into counting
dts[, n_flights := n_matched + n_unmatched + n_cancelled]
# save final dataset
## Clean and Exit ---------------------------------------------------------------------------------------------------------------
rm(list = ls())
gc()
| /uk_airports_punctuality/01-data_io.R | permissive | WeR-stats/workshops | R | false | false | 1,870 | r | ######################################
# UK AIRPORTS PUNCTUALITY - DATA I/O #
######################################
## PREPARATION ------------------------------------------------------------------------------------------------------------------
# load packages
pkgs <- c('data.table', 'rvest')
invisible(lapply(pkgs, require, character.only = TRUE))
# set constants
data_path <- './uk_airports_punctuality/data'
url_home <- 'https://www.caa.co.uk'
url_pref <- paste0(url_home, '/Data-and-analysis/UK-aviation-market/Flight-reliability/Datasets/Punctuality-data/Punctuality-statistics-')
# load data
vars <- fread(file.path(data_path, 'vars.csv'))
## MAIN DATASET -----------------------------------------------------------------------------------------------------------------
# create empty dataset
dts <- setnames( data.table(matrix(nrow = 0, ncol = nrow(vars))), vars$code)
# download data
for(x in 1990:2018){
# read list of files
fnames <- read_html(paste0(url_pref, x)) %>%
html_nodes('#ctl00_cphBody_dzMain_uxColumnDisplay_ctl00_uxControlColumn_ctl01_uxWidgetHost_uxUpdatePanel a') %>%
html_attr('href')
# separates "good" ones
fnames <- fnames[grepl('arr|dep', tolower(fnames))]
# read all files, and bind them in dataset
for(fn in fnames){
y <- fread(paste0(url_home, fn), select = vars$name, col.names = vars$code, na.strings = '')
dts <- rbindlist(list( dts, y ))
}
}
# convert to factor
cols <- vars[type == 'f', code]
dts[, (cols) := lapply(.SD, factor), .SDcols = cols]
# find total flights, then convert percent into counting
dts[, n_flights := n_matched + n_unmatched + n_cancelled]
# save final dataset
## Clean and Exit ---------------------------------------------------------------------------------------------------------------
rm(list = ls())
gc()
|
#' Integración mediante la Regla del Trapecio
#'
#' Esta función nos calcula una aproximación de la integral de f en el intervalo
#' de a hasta b, mediante la Regla del Trapecio.
#'
#' @param a extremo inferior del intervalo
#' @param b extremo superior del intervalo
#' @param f función que vamos a integrar
#'
#' @return La aproximación de la integral en el intervalo
#' @export
#'
#' @details En nuestra entrada f, tenemos que definir nuestra función. Es decir
#' f es del tipo función.
#'
#' @examples
#' regla_trapecio(1.5,3,function(x){return(x^2+x^3)})
#' regla_trapecio(2.5,3.14,function(x){return(cos(x))})
#'
#' @seealso \link{regla_punto_medio}, \link{regla_rectangulo}
#'
regla_trapecio=function(a,b,f){
z=(b-a)*(f(a)+f(b))/2
return(z)
}
| /R/regla_trapecio.R | permissive | JoanBoters/ProyectoFinal | R | false | false | 778 | r | #' Integración mediante la Regla del Trapecio
#'
#' Esta función nos calcula una aproximación de la integral de f en el intervalo
#' de a hasta b, mediante la Regla del Trapecio.
#'
#' @param a extremo inferior del intervalo
#' @param b extremo superior del intervalo
#' @param f función que vamos a integrar
#'
#' @return La aproximación de la integral en el intervalo
#' @export
#'
#' @details En nuestra entrada f, tenemos que definir nuestra función. Es decir
#' f es del tipo función.
#'
#' @examples
#' regla_trapecio(1.5,3,function(x){return(x^2+x^3)})
#' regla_trapecio(2.5,3.14,function(x){return(cos(x))})
#'
#' @seealso \link{regla_punto_medio}, \link{regla_rectangulo}
#'
regla_trapecio=function(a,b,f){
z=(b-a)*(f(a)+f(b))/2
return(z)
}
|
x <- c(45,23,155)
y <- c(1,2,0)
koniunkcja <-x&y
alternatywa <-x|y
negacja <- x!y
czy_wszystkie <- x&&y
czy_chociaz_jeden <- x||y | /zad19.R | no_license | marcin-mulawa/Zadania-R | R | false | false | 130 | r | x <- c(45,23,155)
y <- c(1,2,0)
koniunkcja <-x&y
alternatywa <-x|y
negacja <- x!y
czy_wszystkie <- x&&y
czy_chociaz_jeden <- x||y |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classesAndMethods.R
\docType{class}
\name{markovchain-class}
\alias{markovchain-class}
\alias{*,markovchain,markovchain-method}
\alias{*,markovchain,matrix-method}
\alias{*,markovchain,numeric-method}
\alias{*,matrix,markovchain-method}
\alias{*,numeric,markovchain-method}
\alias{==,markovchain,markovchain-method}
\alias{!=,markovchain,markovchain-method}
\alias{absorbingStates,markovchain-method}
\alias{transientStates,markovchain-method}
\alias{recurrentStates,markovchain-method}
\alias{transientClasses,markovchain-method}
\alias{recurrentClasses,markovchain-method}
\alias{communicatingClasses,markovchain-method}
\alias{steadyStates,markovchain-method}
\alias{conditionalDistribution,markovchain-method}
\alias{hittingProbabilities,markovchain-method}
\alias{canonicForm,markovchain-method}
\alias{coerce,data.frame,markovchain-method}
\alias{coerce,markovchain,data.frame-method}
\alias{coerce,table,markovchain-method}
\alias{coerce,markovchain,igraph-method}
\alias{coerce,markovchain,matrix-method}
\alias{coerce,markovchain,sparseMatrix-method}
\alias{coerce,sparseMatrix,markovchain-method}
\alias{coerce,matrix,markovchain-method}
\alias{coerce,msm,markovchain-method}
\alias{coerce,msm.est,markovchain-method}
\alias{coerce,etm,markovchain-method}
\alias{dim,markovchain-method}
\alias{initialize,markovchain-method}
\alias{names<-,markovchain-method}
\alias{plot,markovchain,missing-method}
\alias{predict,markovchain-method}
\alias{print,markovchain-method}
\alias{show,markovchain-method}
\alias{summary,markovchain-method}
\alias{sort,markovchain-method}
\alias{t,markovchain-method}
\alias{[,markovchain,ANY,ANY,ANY-method}
\alias{^,markovchain,numeric-method}
\title{Markov Chain class}
\arguments{
\item{states}{Name of the states. Must be the same of \code{colnames} and \code{rownames} of the transition matrix}
\item{byrow}{TRUE or FALSE indicating whether the supplied matrix
is either stochastic by rows or by columns}
\item{transitionMatrix}{Square transition matrix}
\item{name}{Optional character name of the Markov chain}
}
\description{
The S4 class that describes \code{markovchain} objects.
}
\note{
\enumerate{
\item \code{markovchain} object are backed by S4 Classes.
\item Validation method is used to assess whether either columns or rows totals to one.
Rounding is used up to \code{.Machine$double.eps * 100}. If state names are not properly
defined for a probability \code{matrix}, coercing to \code{markovhcain} object leads
to overriding states name with artificial "s1", "s2", ... sequence. In addition, operator
overloading has been applied for \eqn{+,*,^,==,!=} operators.
}
}
\section{Creation of objects}{
Objects can be created by calls of the form \code{new("markovchain", states, byrow, transitionMatrix, ...)}.
}
\section{Methods}{
\describe{
\item{*}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: multiply two \code{markovchain} objects}
\item{*}{\code{signature(e1 = "markovchain", e2 = "matrix")}: markovchain by matrix multiplication}
\item{*}{\code{signature(e1 = "markovchain", e2 = "numeric")}: markovchain by numeric vector multiplication }
\item{*}{\code{signature(e1 = "matrix", e2 = "markovchain")}: matrix by markov chain}
\item{*}{\code{signature(e1 = "numeric", e2 = "markovchain")}: numeric vector by \code{markovchain} multiplication }
\item{[}{\code{signature(x = "markovchain", i = "ANY", j = "ANY", drop = "ANY")}: ... }
\item{^}{\code{signature(e1 = "markovchain", e2 = "numeric")}: power of a \code{markovchain} object}
\item{==}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: equality of two \code{markovchain} object}
\item{!=}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: non-equality of two \code{markovchain} object}
\item{absorbingStates}{\code{signature(object = "markovchain")}: method to get absorbing states }
\item{canonicForm}{\code{signature(object = "markovchain")}: return a \code{markovchain} object into canonic form }
\item{coerce}{\code{signature(from = "markovchain", to = "data.frame")}: coerce method from markovchain to \code{data.frame}}
\item{conditionalDistribution}{\code{signature(object = "markovchain")}: returns the conditional probability of subsequent states given a state}
\item{coerce}{\code{signature(from = "data.frame", to = "markovchain")}: coerce method from \code{data.frame} to \code{markovchain}}
\item{coerce}{\code{signature(from = "table", to = "markovchain")}: coerce method from \code{table} to \code{markovchain} }
\item{coerce}{\code{signature(from = "msm", to = "markovchain")}: coerce method from \code{msm} to \code{markovchain} }
\item{coerce}{\code{signature(from = "msm.est", to = "markovchain")}: coerce method from \code{msm.est} (but only from a Probability Matrix) to \code{markovchain} }
\item{coerce}{\code{signature(from = "etm", to = "markovchain")}: coerce method from \code{etm} to \code{markovchain} }
\item{coerce}{\code{signature(from = "sparseMatrix", to = "markovchain")}: coerce method from \code{sparseMatrix} to \code{markovchain} }
\item{coerce}{\code{signature(from = "markovchain", to = "igraph")}: coercing to \code{igraph} objects }
\item{coerce}{\code{signature(from = "markovchain", to = "matrix")}: coercing to \code{matrix} objects }
\item{coerce}{\code{signature(from = "markovchain", to = "sparseMatrix")}: coercing to \code{sparseMatrix} objects }
\item{coerce}{\code{signature(from = "matrix", to = "markovchain")}: coercing to \code{markovchain} objects from \code{matrix} one }
\item{dim}{\code{signature(x = "markovchain")}: method to get the size}
\item{names}{\code{signature(x = "markovchain")}: method to get the names of states}
\item{names<-}{\code{signature(x = "markovchain", value = "character")}: method to set the names of states}
\item{initialize}{\code{signature(.Object = "markovchain")}: initialize method }
\item{plot}{\code{signature(x = "markovchain", y = "missing")}: plot method for \code{markovchain} objects }
\item{predict}{\code{signature(object = "markovchain")}: predict method }
\item{print}{\code{signature(x = "markovchain")}: print method. }
\item{show}{\code{signature(object = "markovchain")}: show method. }
\item{sort}{\code{signature(x = "markovchain", decreasing=FALSE)}: sorting the transition matrix. }
\item{states}{\code{signature(object = "markovchain")}: returns the names of states (as \code{names}. }
\item{steadyStates}{\code{signature(object = "markovchain")}: method to get the steady vector. }
\item{summary}{\code{signature(object = "markovchain")}: method to summarize structure of the markov chain }
\item{transientStates}{\code{signature(object = "markovchain")}: method to get the transient states. }
\item{t}{\code{signature(x = "markovchain")}: transpose matrix }
\item{transitionProbability}{\code{signature(object = "markovchain")}: transition probability }
}
}
\examples{
#show markovchain definition
showClass("markovchain")
#create a simple Markov chain
transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE)
simpleMc<-new("markovchain", states=c("a","b"),
transitionMatrix=transMatr,
name="simpleMc")
#power
simpleMc^4
#some methods
steadyStates(simpleMc)
absorbingStates(simpleMc)
simpleMc[2,1]
t(simpleMc)
is.irreducible(simpleMc)
#conditional distributions
conditionalDistribution(simpleMc, "b")
#example for predict method
sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a")
mcFit<-markovchainFit(data=sequence)
predict(mcFit$estimate, newdata="b",n.ahead=3)
#direct conversion
myMc<-as(transMatr, "markovchain")
#example of summary
summary(simpleMc)
\dontrun{plot(simpleMc)}
}
\references{
A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010
}
\seealso{
\code{\link{markovchainSequence}},\code{\link{markovchainFit}}
}
\author{
Giorgio Spedicato
}
\keyword{classes}
| /man/markovchain-class.Rd | no_license | tintinthong/markovchain | R | false | true | 8,041 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classesAndMethods.R
\docType{class}
\name{markovchain-class}
\alias{markovchain-class}
\alias{*,markovchain,markovchain-method}
\alias{*,markovchain,matrix-method}
\alias{*,markovchain,numeric-method}
\alias{*,matrix,markovchain-method}
\alias{*,numeric,markovchain-method}
\alias{==,markovchain,markovchain-method}
\alias{!=,markovchain,markovchain-method}
\alias{absorbingStates,markovchain-method}
\alias{transientStates,markovchain-method}
\alias{recurrentStates,markovchain-method}
\alias{transientClasses,markovchain-method}
\alias{recurrentClasses,markovchain-method}
\alias{communicatingClasses,markovchain-method}
\alias{steadyStates,markovchain-method}
\alias{conditionalDistribution,markovchain-method}
\alias{hittingProbabilities,markovchain-method}
\alias{canonicForm,markovchain-method}
\alias{coerce,data.frame,markovchain-method}
\alias{coerce,markovchain,data.frame-method}
\alias{coerce,table,markovchain-method}
\alias{coerce,markovchain,igraph-method}
\alias{coerce,markovchain,matrix-method}
\alias{coerce,markovchain,sparseMatrix-method}
\alias{coerce,sparseMatrix,markovchain-method}
\alias{coerce,matrix,markovchain-method}
\alias{coerce,msm,markovchain-method}
\alias{coerce,msm.est,markovchain-method}
\alias{coerce,etm,markovchain-method}
\alias{dim,markovchain-method}
\alias{initialize,markovchain-method}
\alias{names<-,markovchain-method}
\alias{plot,markovchain,missing-method}
\alias{predict,markovchain-method}
\alias{print,markovchain-method}
\alias{show,markovchain-method}
\alias{summary,markovchain-method}
\alias{sort,markovchain-method}
\alias{t,markovchain-method}
\alias{[,markovchain,ANY,ANY,ANY-method}
\alias{^,markovchain,numeric-method}
\title{Markov Chain class}
\arguments{
\item{states}{Name of the states. Must be the same of \code{colnames} and \code{rownames} of the transition matrix}
\item{byrow}{TRUE or FALSE indicating whether the supplied matrix
is either stochastic by rows or by columns}
\item{transitionMatrix}{Square transition matrix}
\item{name}{Optional character name of the Markov chain}
}
\description{
The S4 class that describes \code{markovchain} objects.
}
\note{
\enumerate{
\item \code{markovchain} object are backed by S4 Classes.
\item Validation method is used to assess whether either columns or rows totals to one.
Rounding is used up to \code{.Machine$double.eps * 100}. If state names are not properly
defined for a probability \code{matrix}, coercing to \code{markovhcain} object leads
to overriding states name with artificial "s1", "s2", ... sequence. In addition, operator
overloading has been applied for \eqn{+,*,^,==,!=} operators.
}
}
\section{Creation of objects}{
Objects can be created by calls of the form \code{new("markovchain", states, byrow, transitionMatrix, ...)}.
}
\section{Methods}{
\describe{
\item{*}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: multiply two \code{markovchain} objects}
\item{*}{\code{signature(e1 = "markovchain", e2 = "matrix")}: markovchain by matrix multiplication}
\item{*}{\code{signature(e1 = "markovchain", e2 = "numeric")}: markovchain by numeric vector multiplication }
\item{*}{\code{signature(e1 = "matrix", e2 = "markovchain")}: matrix by markov chain}
\item{*}{\code{signature(e1 = "numeric", e2 = "markovchain")}: numeric vector by \code{markovchain} multiplication }
\item{[}{\code{signature(x = "markovchain", i = "ANY", j = "ANY", drop = "ANY")}: ... }
\item{^}{\code{signature(e1 = "markovchain", e2 = "numeric")}: power of a \code{markovchain} object}
\item{==}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: equality of two \code{markovchain} object}
\item{!=}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: non-equality of two \code{markovchain} object}
\item{absorbingStates}{\code{signature(object = "markovchain")}: method to get absorbing states }
\item{canonicForm}{\code{signature(object = "markovchain")}: return a \code{markovchain} object into canonic form }
\item{coerce}{\code{signature(from = "markovchain", to = "data.frame")}: coerce method from markovchain to \code{data.frame}}
\item{conditionalDistribution}{\code{signature(object = "markovchain")}: returns the conditional probability of subsequent states given a state}
\item{coerce}{\code{signature(from = "data.frame", to = "markovchain")}: coerce method from \code{data.frame} to \code{markovchain}}
\item{coerce}{\code{signature(from = "table", to = "markovchain")}: coerce method from \code{table} to \code{markovchain} }
\item{coerce}{\code{signature(from = "msm", to = "markovchain")}: coerce method from \code{msm} to \code{markovchain} }
\item{coerce}{\code{signature(from = "msm.est", to = "markovchain")}: coerce method from \code{msm.est} (but only from a Probability Matrix) to \code{markovchain} }
\item{coerce}{\code{signature(from = "etm", to = "markovchain")}: coerce method from \code{etm} to \code{markovchain} }
\item{coerce}{\code{signature(from = "sparseMatrix", to = "markovchain")}: coerce method from \code{sparseMatrix} to \code{markovchain} }
\item{coerce}{\code{signature(from = "markovchain", to = "igraph")}: coercing to \code{igraph} objects }
\item{coerce}{\code{signature(from = "markovchain", to = "matrix")}: coercing to \code{matrix} objects }
\item{coerce}{\code{signature(from = "markovchain", to = "sparseMatrix")}: coercing to \code{sparseMatrix} objects }
\item{coerce}{\code{signature(from = "matrix", to = "markovchain")}: coercing to \code{markovchain} objects from \code{matrix} one }
\item{dim}{\code{signature(x = "markovchain")}: method to get the size}
\item{names}{\code{signature(x = "markovchain")}: method to get the names of states}
\item{names<-}{\code{signature(x = "markovchain", value = "character")}: method to set the names of states}
\item{initialize}{\code{signature(.Object = "markovchain")}: initialize method }
\item{plot}{\code{signature(x = "markovchain", y = "missing")}: plot method for \code{markovchain} objects }
\item{predict}{\code{signature(object = "markovchain")}: predict method }
\item{print}{\code{signature(x = "markovchain")}: print method. }
\item{show}{\code{signature(object = "markovchain")}: show method. }
\item{sort}{\code{signature(x = "markovchain", decreasing=FALSE)}: sorting the transition matrix. }
\item{states}{\code{signature(object = "markovchain")}: returns the names of states (as \code{names}. }
\item{steadyStates}{\code{signature(object = "markovchain")}: method to get the steady vector. }
\item{summary}{\code{signature(object = "markovchain")}: method to summarize structure of the markov chain }
\item{transientStates}{\code{signature(object = "markovchain")}: method to get the transient states. }
\item{t}{\code{signature(x = "markovchain")}: transpose matrix }
\item{transitionProbability}{\code{signature(object = "markovchain")}: transition probability }
}
}
\examples{
#show markovchain definition
showClass("markovchain")
#create a simple Markov chain
transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE)
simpleMc<-new("markovchain", states=c("a","b"),
transitionMatrix=transMatr,
name="simpleMc")
#power
simpleMc^4
#some methods
steadyStates(simpleMc)
absorbingStates(simpleMc)
simpleMc[2,1]
t(simpleMc)
is.irreducible(simpleMc)
#conditional distributions
conditionalDistribution(simpleMc, "b")
#example for predict method
sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a")
mcFit<-markovchainFit(data=sequence)
predict(mcFit$estimate, newdata="b",n.ahead=3)
#direct conversion
myMc<-as(transMatr, "markovchain")
#example of summary
summary(simpleMc)
\dontrun{plot(simpleMc)}
}
\references{
A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010
}
\seealso{
\code{\link{markovchainSequence}},\code{\link{markovchainFit}}
}
\author{
Giorgio Spedicato
}
\keyword{classes}
|
jags_lqmm_IG <-
function () {
# constants
c1 <- (1-2*tau)/(tau*(1-tau))
c2 <- 2/(tau*(1-tau))
# likelihood
for (i in 1:I){
# longitudinal part
for(j in offset[i]:(offset[i+1]-1)){
y[j] ~ dnorm(mu[j], prec[j])
va1[j] ~ dexp(1/sigma)
prec[j] <- 1/(sigma*c2*va1[j])
mu[j] <- inprod(beta[1:ncX], X[j, 1:ncX]) + inprod(b[i, 1:ncU], U[j, 1:ncU]) + c1*va1[j]
}#end of j loop
# random effects
for(r in 1:ncU){
b[i,r] ~ dnorm(0, prec.Sigma2[r])
}
}#end of i loop
# priors for parameters
for(rr in 1:ncU){
prec.Sigma2[rr] ~ dgamma(priorA.Sigma2, priorB.Sigma2)
covariance.b[rr] <- 1/prec.Sigma2[rr]
}
beta[1:ncX] ~ dmnorm(priorMean.beta[], priorTau.beta[, ])
sigma ~ dgamma(priorA.sigma, priorB.sigma)
}
| /R/jags_lqmm_IG.R | no_license | AntoineBbi/BQt | R | false | false | 782 | r | jags_lqmm_IG <-
function () {
# constants
c1 <- (1-2*tau)/(tau*(1-tau))
c2 <- 2/(tau*(1-tau))
# likelihood
for (i in 1:I){
# longitudinal part
for(j in offset[i]:(offset[i+1]-1)){
y[j] ~ dnorm(mu[j], prec[j])
va1[j] ~ dexp(1/sigma)
prec[j] <- 1/(sigma*c2*va1[j])
mu[j] <- inprod(beta[1:ncX], X[j, 1:ncX]) + inprod(b[i, 1:ncU], U[j, 1:ncU]) + c1*va1[j]
}#end of j loop
# random effects
for(r in 1:ncU){
b[i,r] ~ dnorm(0, prec.Sigma2[r])
}
}#end of i loop
# priors for parameters
for(rr in 1:ncU){
prec.Sigma2[rr] ~ dgamma(priorA.Sigma2, priorB.Sigma2)
covariance.b[rr] <- 1/prec.Sigma2[rr]
}
beta[1:ncX] ~ dmnorm(priorMean.beta[], priorTau.beta[, ])
sigma ~ dgamma(priorA.sigma, priorB.sigma)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rw_Metro_R.R
\name{rw_Metro_R}
\alias{rw_Metro_R}
\title{Random Walk Metropolis sampler using R}
\usage{
rw_Metro_R(sigma, x0, N, f)
}
\arguments{
\item{sigma}{the standard deviation of the normal random increment with a zero mean}
\item{x0}{the initial iteration point}
\item{N}{the designated number of random numbers (including the initial point x0)}
\item{f}{the density of target distribution}
}
\value{
a list of length 3, x the random numbers of size \code{N}, k the number of rejection times, acceptance_rate the acceptance rate of the candidate points
}
\description{
A Random Walk Metropolis sampler using R
}
\examples{
\dontrun{
laplace<-function(x) return(1/2*exp(-abs(x)))
lapR <- rw_Metro_R(2,25,2000,laplace);
plot(1:2000,lapR$x,type='l')
abline(h=c(-3*sqrt(2),3*sqrt(2)))
}
}
| /man/rw_Metro_R.Rd | no_license | Daniel-1997127/SC19085 | R | false | true | 874 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rw_Metro_R.R
\name{rw_Metro_R}
\alias{rw_Metro_R}
\title{Random Walk Metropolis sampler using R}
\usage{
rw_Metro_R(sigma, x0, N, f)
}
\arguments{
\item{sigma}{the standard deviation of the normal random increment with a zero mean}
\item{x0}{the initial iteration point}
\item{N}{the designated number of random numbers (including the initial point x0)}
\item{f}{the density of target distribution}
}
\value{
a list of length 3, x the random numbers of size \code{N}, k the number of rejection times, acceptance_rate the acceptance rate of the candidate points
}
\description{
A Random Walk Metropolis sampler using R
}
\examples{
\dontrun{
laplace<-function(x) return(1/2*exp(-abs(x)))
lapR <- rw_Metro_R(2,25,2000,laplace);
plot(1:2000,lapR$x,type='l')
abline(h=c(-3*sqrt(2),3*sqrt(2)))
}
}
|
#' Pass network ( WIP )
#'
#' Plots a marker for each player at their median passing position, and draws
#' connections between players to represent the number of passes exchanged
#' between them
#'
#' @param cFilePath
#' @param cTag
#' @examples
#' @import data.table
#' @import zoo
#' @export
fParseTrackingDataOneTeam = function (
cFilePath,
cTag = '',
nXSpan = 120,
nYSpan = 80,
xMaxBB = 1,
yMaxBB = 1
) {
dtRawData = fread(
cFilePath,
skip = 3,
header = F
)
dtMetaData = fread(
cFilePath,
skip = 2,
nrows = 1,
header = F
)
vcColnames = na.locf(unlist(dtMetaData))
rm(dtMetaData)
vcColnames[vcColnames == 'Time [s]'] = 'Time_s'
vcColnames[
grepl(
vcColnames,
pattern = 'Player|Ball'
)
] = paste0(
cTag,
paste0(
grep(
vcColnames[
grepl(
vcColnames,
pattern = 'Player|Ball'
)
],
pattern = 'Player|Ball',
value = T
),
c('X','Y')
)
)
vcColnames = gsub(
vcColnames,
pattern = 'Ball',
replacement = 'Player0'
)
setnames(
dtRawData,
vcColnames
)
for ( cColname in colnames(dtRawData) ) {
if ( grepl(cColname, pattern = 'X$') ) {
dtRawData[,
(cColname) := ( dtRawData[, cColname, with = F] * nXSpan / xMaxBB ) - ( nXSpan * 0.5 )
]
}
# y coordinates are flipped
if ( grepl(cColname, pattern = 'Y$') ) {
dtRawData[,
(cColname) := ( ( yMaxBB - dtRawData[, cColname, with = F] ) * nYSpan / yMaxBB ) - ( nYSpan * 0.5 )
]
}
}
dtRawData
}
| /R/fParseTrackingDataOneTeam.R | permissive | thecomeonman/CodaBonito | R | false | false | 1,764 | r | #' Pass network ( WIP )
#'
#' Plots a marker for each player at their median passing position, and draws
#' connections between players to represent the number of passes exchanged
#' between them
#'
#' @param cFilePath
#' @param cTag
#' @examples
#' @import data.table
#' @import zoo
#' @export
fParseTrackingDataOneTeam = function (
cFilePath,
cTag = '',
nXSpan = 120,
nYSpan = 80,
xMaxBB = 1,
yMaxBB = 1
) {
dtRawData = fread(
cFilePath,
skip = 3,
header = F
)
dtMetaData = fread(
cFilePath,
skip = 2,
nrows = 1,
header = F
)
vcColnames = na.locf(unlist(dtMetaData))
rm(dtMetaData)
vcColnames[vcColnames == 'Time [s]'] = 'Time_s'
vcColnames[
grepl(
vcColnames,
pattern = 'Player|Ball'
)
] = paste0(
cTag,
paste0(
grep(
vcColnames[
grepl(
vcColnames,
pattern = 'Player|Ball'
)
],
pattern = 'Player|Ball',
value = T
),
c('X','Y')
)
)
vcColnames = gsub(
vcColnames,
pattern = 'Ball',
replacement = 'Player0'
)
setnames(
dtRawData,
vcColnames
)
for ( cColname in colnames(dtRawData) ) {
if ( grepl(cColname, pattern = 'X$') ) {
dtRawData[,
(cColname) := ( dtRawData[, cColname, with = F] * nXSpan / xMaxBB ) - ( nXSpan * 0.5 )
]
}
# y coordinates are flipped
if ( grepl(cColname, pattern = 'Y$') ) {
dtRawData[,
(cColname) := ( ( yMaxBB - dtRawData[, cColname, with = F] ) * nYSpan / yMaxBB ) - ( nYSpan * 0.5 )
]
}
}
dtRawData
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{spp583}
\alias{spp583}
\title{species function}
\usage{
spp583(a, b, c, d, e)
}
\arguments{
\item{a}{environmental parameter}
\item{b}{environmental parameter}
\item{c}{environmental parameter}
\item{d}{environmental parameter}
\item{e}{environmental parameter}
}
\description{
species function
}
\examples{
spp583()
}
\keyword{function}
\keyword{species}
| /man/spp583.Rd | permissive | Djeppschmidt/Model.Microbiome | R | false | true | 456 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{spp583}
\alias{spp583}
\title{species function}
\usage{
spp583(a, b, c, d, e)
}
\arguments{
\item{a}{environmental parameter}
\item{b}{environmental parameter}
\item{c}{environmental parameter}
\item{d}{environmental parameter}
\item{e}{environmental parameter}
}
\description{
species function
}
\examples{
spp583()
}
\keyword{function}
\keyword{species}
|
#' Presidential General Election Results
#'
#' @description 2016 Presidential General Election Results for each state.
#'
#' @format A data frame with 833 rows and 10 columns, providing information for results in each state :
#' \describe{
#' \item{cand_id}{A code assigned to a candidate by the FEC}
#' \item{state}{The state of the votes.}
#' \item{general_election_date}{The election date.}
#' \item{first_name}{First name of candidate}
#' \item{last_name}{Last name of candidate}
#' \item{last_name_first}{First and last name of candidate}
#' \item{party}{Party of the candidates}
#' \item{general_results}{The number of people voted in that state.}
#' \item{general_percent}{The percent of people }
#' \item{winner_indicator}{If candidate won, then it's "W", other wise it is NA}
#'}
#' @source <https://transition.fec.gov/general/FederalElections2016.shtml>, last editted Feb 2018
"results"
| /R/results.R | no_license | dA505819/fec16 | R | false | false | 917 | r | #' Presidential General Election Results
#'
#' @description 2016 Presidential General Election Results for each state.
#'
#' @format A data frame with 833 rows and 10 columns, providing information for results in each state :
#' \describe{
#' \item{cand_id}{A code assigned to a candidate by the FEC}
#' \item{state}{The state of the votes.}
#' \item{general_election_date}{The election date.}
#' \item{first_name}{First name of candidate}
#' \item{last_name}{Last name of candidate}
#' \item{last_name_first}{First and last name of candidate}
#' \item{party}{Party of the candidates}
#' \item{general_results}{The number of people voted in that state.}
#' \item{general_percent}{The percent of people }
#' \item{winner_indicator}{If candidate won, then it's "W", other wise it is NA}
#'}
#' @source <https://transition.fec.gov/general/FederalElections2016.shtml>, last editted Feb 2018
"results"
|
corr<-function(directory,threshold=0){
correlations=NULL
for(i in 1:332){
Path = paste("/Users/meg116/datasciencecoursera/specdata/",str_pad(i,3,pad="0"),".csv",sep="")
MyData <- read.csv(file=Path,header=TRUE,sep=",")
if(dim(subset(MyData,(!is.na(MyData[,2]))&(!is.na(MyData[,3]))))[1]>threshold)
correlations<-c(correlations,cor(MyData[,2],MyData[,3],use="complete.obs"))
}
if(is.null(correlations))
result<-numeric(1)
correlations
}
#paste all together, then find mean | /corr.R | no_license | 1002221/datasciencecoursera | R | false | false | 531 | r | corr<-function(directory,threshold=0){
correlations=NULL
for(i in 1:332){
Path = paste("/Users/meg116/datasciencecoursera/specdata/",str_pad(i,3,pad="0"),".csv",sep="")
MyData <- read.csv(file=Path,header=TRUE,sep=",")
if(dim(subset(MyData,(!is.na(MyData[,2]))&(!is.na(MyData[,3]))))[1]>threshold)
correlations<-c(correlations,cor(MyData[,2],MyData[,3],use="complete.obs"))
}
if(is.null(correlations))
result<-numeric(1)
correlations
}
#paste all together, then find mean |
## Put comments here that give an overall description of what your
## functions do - yossi
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function() x
setmatrix<-function(solve) m<<- solve
getmatrix<-function() m
list(set=set, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
m<-x$getmatrix()
if(!is.null(m)){
message("getting cached data")
return(m)
}
matrix<-x$get()
m<-solve(matrix, ...)
x$setmatrix(m)
m
## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | taucg/ProgrammingAssignment2 | R | false | false | 712 | r | ## Put comments here that give an overall description of what your
## functions do - yossi
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function() x
setmatrix<-function(solve) m<<- solve
getmatrix<-function() m
list(set=set, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
m<-x$getmatrix()
if(!is.null(m)){
message("getting cached data")
return(m)
}
matrix<-x$get()
m<-solve(matrix, ...)
x$setmatrix(m)
m
## Return a matrix that is the inverse of 'x'
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/week.r
\name{week_stamp}
\alias{week_stamp}
\title{Compute a "Week" stamp (like timestamp) : week number from 1970-01-01}
\usage{
week_stamp(yw)
}
\arguments{
\item{yw}{yearweek value}
}
\description{
This provide a continuous index for week number which not depend on period bound (unlike make_week_index)
This index is usefull to compute on weekly data (+1 is always the next week) without using date and to plotting
weekly data
It is usefull to plot week based data.
}
| /man/week_stamp.Rd | no_license | cturbelin/ifnBase | R | false | true | 550 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/week.r
\name{week_stamp}
\alias{week_stamp}
\title{Compute a "Week" stamp (like timestamp) : week number from 1970-01-01}
\usage{
week_stamp(yw)
}
\arguments{
\item{yw}{yearweek value}
}
\description{
This provide a continuous index for week number which not depend on period bound (unlike make_week_index)
This index is usefull to compute on weekly data (+1 is always the next week) without using date and to plotting
weekly data
It is usefull to plot week based data.
}
|
library(RandomWalk)
library(StockPriceSimulator)
library(ggplot2)
library(grid)
library(gridExtra)
scale <- 10000
return <- sstock_return_ito(scale = scale, sigma = 0.2)
rand_walk_t <- trwalkGenerator(time_to_maturity = 4,
scale = 100)
interval <- 1/scale
p <- ggplot(data.frame(Return = return)) +
stat_density(aes(x = Return),
geom = "area",
alpha = 0.5,
fill = "steelblue") +
ggplot2::stat_function(fun = dnorm,
color = "black",
args = list(mean = 0,
sd = 0.2 * sqrt(interval))) +
theme(legend.position = 'none',
axis.title = element_text(size = rel(0.8)),
axis.text = element_text(size = rel(0.7)),
plot.title = element_text(size = rel(0.8))) +
labs( x = 'Daily Stock Return',
y = 'Density')
tikzDevice::tikz(file = "figures/stock_return_density.tex", width = 6, height = 3)
p
dev.off()
################################################################################
# Log---return
################################################################################
library(RandomWalk)
library(StockPriceSimulator)
library(ggplot2)
library(grid)
library(gridExtra)
library(purrr)
scale <- 500
n <- 1:5000
t <- 1
s0 <- 50
sigma <- 0.3
a <- 0 # alpha
stock <- unlist(map(n, ~sstock(initial_stock_price = s0, time_to_maturity = t,
sigma = sigma,
seed = .x,
scale = scale)$stock_price_path[scale * t + 1]))
log_return <- log(stock / s0)
p <- ggplot(data.frame(return = log_return)) +
stat_density(aes(x = return),
geom = "area",
alpha = 0.5,
fill = "steelblue") +
ggplot2::stat_function(fun = dnorm,
color = "black",
args = list(mean = -sigma ^2 / 2,
sd = sigma * sqrt(t))) +
theme(legend.position = 'none',
axis.title = element_text(size = rel(0.8)),
axis.text = element_text(size = rel(0.7)),
plot.title = element_text(size = rel(0.8))) +
labs( x = 'Log-return',
y = 'Density')
p
tikzDevice::tikz(file = "figures/stock_logreturn_density.tex", width = 6, height = 3)
p
dev.off()
tikzDevice::tikz(file = "figures/stock_logreturn_density2.tex", width = 4, height = 2)
p
dev.off()
scale <- 365
n <- 1:5000
t <- 1
s0 <- 115.08
a <- alpha
stock <- unlist(map(n, ~sstock(initial_stock_price = s0, time_to_maturity = t,
sigma = sigma,
seed = .x,
scale = scale)$stock_price_path[scale * t + 1]))
log_return <- log(stock / s0)
p <- ggplot(data.frame(return = log_return)) +
stat_density(aes(x = return),
geom = "line",
alpha = 0.5,
colour = "darkred") +
stat_density(data = data.frame(u), aes(u),
geom = "line",
colour = 'steelblue')+
theme(legend.position = 'none',
axis.title = element_text(size = rel(0.8)),
axis.text = element_text(size = rel(0.7)),
plot.title = element_text(size = rel(0.8))) +
labs( x = 'Log-return',
y = 'Density')
tikzDevice::tikz(file = "figures/realvsempirical2.tex", width = 4, height = 2)
p
dev.off()
################################################################################
# log--normal stock return return
################################################################################
delta <- s0 ^2 * exp(2 * a * t) * (exp(sigma ^2 * t) - 1)
mu <- log(50) + (a - sigma ^2 / 2) * t
p <- ggplot(data.frame(stock = stock)) +
stat_density(aes(x = stock),
geom = "area",
alpha = 0.5,
fill = "steelblue") +
ggplot2::stat_function(fun = dlnorm,
color = "black",
args = list(meanlog = mu,
sdlog = sigma)) +
theme(legend.position = 'none',
axis.title = element_text(size = rel(0.8)),
axis.text = element_text(size = rel(0.7)),
plot.title = element_text(size = rel(0.8))) +
labs( x = 'Daily Stock Return',
y = 'Density')
tikzDevice::tikz(file = "figures/stock_log_density.tex", width = 6, height = 3)
p
dev.off()
| /analysis/stock_return_ito_density.R | no_license | AnthonyTedde/thesisDoc | R | false | false | 4,483 | r | library(RandomWalk)
library(StockPriceSimulator)
library(ggplot2)
library(grid)
library(gridExtra)
scale <- 10000
return <- sstock_return_ito(scale = scale, sigma = 0.2)
rand_walk_t <- trwalkGenerator(time_to_maturity = 4,
scale = 100)
interval <- 1/scale
p <- ggplot(data.frame(Return = return)) +
stat_density(aes(x = Return),
geom = "area",
alpha = 0.5,
fill = "steelblue") +
ggplot2::stat_function(fun = dnorm,
color = "black",
args = list(mean = 0,
sd = 0.2 * sqrt(interval))) +
theme(legend.position = 'none',
axis.title = element_text(size = rel(0.8)),
axis.text = element_text(size = rel(0.7)),
plot.title = element_text(size = rel(0.8))) +
labs( x = 'Daily Stock Return',
y = 'Density')
tikzDevice::tikz(file = "figures/stock_return_density.tex", width = 6, height = 3)
p
dev.off()
################################################################################
# Log---return
################################################################################
library(RandomWalk)
library(StockPriceSimulator)
library(ggplot2)
library(grid)
library(gridExtra)
library(purrr)
scale <- 500
n <- 1:5000
t <- 1
s0 <- 50
sigma <- 0.3
a <- 0 # alpha
stock <- unlist(map(n, ~sstock(initial_stock_price = s0, time_to_maturity = t,
sigma = sigma,
seed = .x,
scale = scale)$stock_price_path[scale * t + 1]))
log_return <- log(stock / s0)
p <- ggplot(data.frame(return = log_return)) +
stat_density(aes(x = return),
geom = "area",
alpha = 0.5,
fill = "steelblue") +
ggplot2::stat_function(fun = dnorm,
color = "black",
args = list(mean = -sigma ^2 / 2,
sd = sigma * sqrt(t))) +
theme(legend.position = 'none',
axis.title = element_text(size = rel(0.8)),
axis.text = element_text(size = rel(0.7)),
plot.title = element_text(size = rel(0.8))) +
labs( x = 'Log-return',
y = 'Density')
p
tikzDevice::tikz(file = "figures/stock_logreturn_density.tex", width = 6, height = 3)
p
dev.off()
tikzDevice::tikz(file = "figures/stock_logreturn_density2.tex", width = 4, height = 2)
p
dev.off()
scale <- 365
n <- 1:5000
t <- 1
s0 <- 115.08
a <- alpha
stock <- unlist(map(n, ~sstock(initial_stock_price = s0, time_to_maturity = t,
sigma = sigma,
seed = .x,
scale = scale)$stock_price_path[scale * t + 1]))
log_return <- log(stock / s0)
p <- ggplot(data.frame(return = log_return)) +
stat_density(aes(x = return),
geom = "line",
alpha = 0.5,
colour = "darkred") +
stat_density(data = data.frame(u), aes(u),
geom = "line",
colour = 'steelblue')+
theme(legend.position = 'none',
axis.title = element_text(size = rel(0.8)),
axis.text = element_text(size = rel(0.7)),
plot.title = element_text(size = rel(0.8))) +
labs( x = 'Log-return',
y = 'Density')
tikzDevice::tikz(file = "figures/realvsempirical2.tex", width = 4, height = 2)
p
dev.off()
################################################################################
# log--normal stock return return
################################################################################
delta <- s0 ^2 * exp(2 * a * t) * (exp(sigma ^2 * t) - 1)
mu <- log(50) + (a - sigma ^2 / 2) * t
p <- ggplot(data.frame(stock = stock)) +
stat_density(aes(x = stock),
geom = "area",
alpha = 0.5,
fill = "steelblue") +
ggplot2::stat_function(fun = dlnorm,
color = "black",
args = list(meanlog = mu,
sdlog = sigma)) +
theme(legend.position = 'none',
axis.title = element_text(size = rel(0.8)),
axis.text = element_text(size = rel(0.7)),
plot.title = element_text(size = rel(0.8))) +
labs( x = 'Daily Stock Return',
y = 'Density')
tikzDevice::tikz(file = "figures/stock_log_density.tex", width = 6, height = 3)
p
dev.off()
|
\name{visualcenter}
\alias{visualcenter}
\title{visualcenter}
\description{Point in the middle of the min and max for each group.}
\usage{visualcenter(d, ...)}
\arguments{
\item{d}{
}
\item{\dots}{
}
}
\author{Toby Dylan Hocking}
| /man/visualcenter.Rd | no_license | tdhock/directlabels | R | false | false | 242 | rd | \name{visualcenter}
\alias{visualcenter}
\title{visualcenter}
\description{Point in the middle of the min and max for each group.}
\usage{visualcenter(d, ...)}
\arguments{
\item{d}{
}
\item{\dots}{
}
}
\author{Toby Dylan Hocking}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/badge_opencpu.R
\name{badge_opencpu}
\alias{badge_opencpu}
\title{badge_opencpu}
\usage{
badge_opencpu(
url,
logo_path = "https://avatars2.githubusercontent.com/u/28672890?s=200&v=4",
size = 24
)
}
\arguments{
\item{url}{url to repository on Gitlab}
\item{logo_path}{path to OpenCpu logo (default:
"https://avatars2.githubusercontent.com/u/28672890?s=200&v=4")}
\item{size}{size of logo in pixels (default: 24)}
}
\value{
OpenCpu logo in html with path to R package on OpenCpu
}
\description{
badge_opencpu
}
| /man/badge_opencpu.Rd | permissive | KWB-R/kwb.pkgstatus | R | false | true | 596 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/badge_opencpu.R
\name{badge_opencpu}
\alias{badge_opencpu}
\title{badge_opencpu}
\usage{
badge_opencpu(
url,
logo_path = "https://avatars2.githubusercontent.com/u/28672890?s=200&v=4",
size = 24
)
}
\arguments{
\item{url}{url to repository on Gitlab}
\item{logo_path}{path to OpenCpu logo (default:
"https://avatars2.githubusercontent.com/u/28672890?s=200&v=4")}
\item{size}{size of logo in pixels (default: 24)}
}
\value{
OpenCpu logo in html with path to R package on OpenCpu
}
\description{
badge_opencpu
}
|
#map of mite sampling
#install.packages("OpenStreetMap")
library(ggmap)
library(ggplot2)
?get_stamenmap
#lat <- c(47,55)
#long <- c(5,15)
lat <- c(45,57)
long <- c(3,17)
bbox <- make_bbox(long,lat,f=0.05)
#bbox <- c(left = 5, bottom = 47, right = 15, top = 55)
#a <- get_stamenmap(bbox,maptype="toner-hybrid",source="stamen", zoom=8)
#b <- get_stamenmap(bbox,maptype="toner-hybrid",source="stamen")
b <- get_stamenmap(bbox,maptype="terrain-background",source="stamen", zoom=9)
ggmap(b)
df <- read.csv("~/Dropbox/AA_projects/mites_ASD/sampling.csv")
ggmap(b)+
geom_point(aes(x = lon, y = lat, colour=location),
data = df, size=9, alpha = .8) +
labs(x = "Longitude", y = "Latitude", colour="sampling location") +
scale_colour_manual(values=c("Göttingen forest"="#AAA9AD", "Hainich forest"="#EE4000", "Kranichstein forest"="#4F94CD", "Schwäbische Alb"="#73880A")) +
theme(axis.title = element_text(family = "Arial", color="#666666", face="plain", size=16), axis.text.x = element_text(family = "Arial", color="#666666", face="plain", size=14), axis.text.y = element_text(family = "Arial", color="#666666", face="plain", size=14)) +
theme(panel.border = element_rect(linetype = "solid", colour = "grey", fill = NA), panel.grid.major = element_line(color = "grey", linetype = "dotted"), panel.grid.minor = element_line(colour = "grey", linetype = "dotted"), panel.background = element_blank(), axis.line = element_line(colour = "grey40")) +
theme(legend.text= element_text(family = "Arial", color="#666666", face="plain", size=12), legend.title= element_text(family = "Arial", color="#666666", face="plain", size=14))
ggmap(b) + geom_point(data = df, aes(lon,lat, color=location),size=10,alpha=0.7) +
labs(x = "Longitude", y = "Latitude", +
#color = "Sampling locations") +
scale_colour_manual(values=c("Göttingen forest"="AAA9AD", "Hainich forest"="EE4000", "Kranichstein forest"="4F94CD", "Schwäbische Alb"="73880A"))) +
theme(axis.title = element_text(family = "Arial", color="#666666", face="plain", size=18), axis.text.x = element_text(family = "Arial", color="#666666", face="plain", size=14), axis.text.y = element_text(family = "Arial", color="#666666", face="plain", size=14)) +
theme(panel.border = element_rect(linetype = "solid", colour = "grey", fill = NA), panel.grid.major = element_line(color = "grey", linetype = "dotted"), panel.grid.minor = element_line(colour = "grey", linetype = "dotted"), panel.background = element_blank(), axis.line = element_line(colour = "grey40"))
| /haplotype_divergence/maps_GPS.R | no_license | AsexGenomeEvol/HD_Oppiella | R | false | false | 2,567 | r | #map of mite sampling
#install.packages("OpenStreetMap")
library(ggmap)
library(ggplot2)
?get_stamenmap
#lat <- c(47,55)
#long <- c(5,15)
lat <- c(45,57)
long <- c(3,17)
bbox <- make_bbox(long,lat,f=0.05)
#bbox <- c(left = 5, bottom = 47, right = 15, top = 55)
#a <- get_stamenmap(bbox,maptype="toner-hybrid",source="stamen", zoom=8)
#b <- get_stamenmap(bbox,maptype="toner-hybrid",source="stamen")
b <- get_stamenmap(bbox,maptype="terrain-background",source="stamen", zoom=9)
ggmap(b)
df <- read.csv("~/Dropbox/AA_projects/mites_ASD/sampling.csv")
ggmap(b)+
geom_point(aes(x = lon, y = lat, colour=location),
data = df, size=9, alpha = .8) +
labs(x = "Longitude", y = "Latitude", colour="sampling location") +
scale_colour_manual(values=c("Göttingen forest"="#AAA9AD", "Hainich forest"="#EE4000", "Kranichstein forest"="#4F94CD", "Schwäbische Alb"="#73880A")) +
theme(axis.title = element_text(family = "Arial", color="#666666", face="plain", size=16), axis.text.x = element_text(family = "Arial", color="#666666", face="plain", size=14), axis.text.y = element_text(family = "Arial", color="#666666", face="plain", size=14)) +
theme(panel.border = element_rect(linetype = "solid", colour = "grey", fill = NA), panel.grid.major = element_line(color = "grey", linetype = "dotted"), panel.grid.minor = element_line(colour = "grey", linetype = "dotted"), panel.background = element_blank(), axis.line = element_line(colour = "grey40")) +
theme(legend.text= element_text(family = "Arial", color="#666666", face="plain", size=12), legend.title= element_text(family = "Arial", color="#666666", face="plain", size=14))
ggmap(b) + geom_point(data = df, aes(lon,lat, color=location),size=10,alpha=0.7) +
labs(x = "Longitude", y = "Latitude", +
#color = "Sampling locations") +
scale_colour_manual(values=c("Göttingen forest"="AAA9AD", "Hainich forest"="EE4000", "Kranichstein forest"="4F94CD", "Schwäbische Alb"="73880A"))) +
theme(axis.title = element_text(family = "Arial", color="#666666", face="plain", size=18), axis.text.x = element_text(family = "Arial", color="#666666", face="plain", size=14), axis.text.y = element_text(family = "Arial", color="#666666", face="plain", size=14)) +
theme(panel.border = element_rect(linetype = "solid", colour = "grey", fill = NA), panel.grid.major = element_line(color = "grey", linetype = "dotted"), panel.grid.minor = element_line(colour = "grey", linetype = "dotted"), panel.background = element_blank(), axis.line = element_line(colour = "grey40"))
|
#' @useDynLib trophicR, .registration = TRUE
#' @import Rcpp
.onLoad <- function(libname, pkgname) {
modules <- paste0("stan_fit4", names(stanmodels), "_mod")
for (m in modules) loadModule(m, what = TRUE)
}
| /R/zzz.R | no_license | virgile-baudrot/trophicR | R | false | false | 213 | r | #' @useDynLib trophicR, .registration = TRUE
#' @import Rcpp
.onLoad <- function(libname, pkgname) {
modules <- paste0("stan_fit4", names(stanmodels), "_mod")
for (m in modules) loadModule(m, what = TRUE)
}
|
#' Similar to attach, but overwrites global
#'
#' @param ll list with named values
#' @export
#' @examples
#' zattach(list(a = 5))
#' a
zattach <- function(ll) {
for (i in 1:length(ll)) {
assign(names(ll)[i], ll[[i]], envir=globalenv())
}
}
#' Similar to do.call, but allows extra arguments
#'
#' @param f function to be evaluated
#' @param ll list containing arguments
#' @export
#' @examples
#' do.call2(cbind, list(a = rep(0, 3), b = rep(1, 3), c = rep(2, 3)), a = c(1, 2, 3), d = c(4, 5, 4))
do.call2 <- function(f, ll, ...) {
dots <- eval(substitute(alist(...)))
ll <- modifyList(ll, dots)
do.call(f, ll)
}
#' Converts list(list(fields)) into list(fields(list))
#'
#' @param dots list of lists
#' @export
#' @examples
#' listcomb(list(list(a = 5, b = 3), list(a = 2, b = 4)))
listcomb <- function(dots) {
nms <- names(dots[[1]])
ans <- as.list(nms)
names(ans) <- nms
for (nm in nms) {
ans[[nm]] <- lapply(1:length(dots), function(i) dots[[i]][[nm]])
}
ans
}
#' Calls mclapply and combines the end result using listcomb
#'
#' @param x List of arguments
#' @param f Single-argument function
#' @param mc.cores Number of cores: 0 means use lapply
#' @import parallel
#' @export
#' @examples
#' lclapply(1:10, function(i) list(a = i, b = i^2), mc.cores = 1)
lclapply <- function(x, f, mc.cores = 0) {
if (mc.cores == 0) {
return(listcomb(lapply(x, f)))
} else {
return(listcomb(mclapply(x, f, mc.cores = mc.cores)))
}
}
#' Either uses lapply or mclapply
#'
#' @param x List of arguments
#' @param f Single-argument function
#' @param mc.cores Number of cores: 0 means use lapply
#' @import parallel
#' @export
#' @examples
#' lclapply(1:10, function(i) list(a = i, b = i^2), mc.cores = 1)
mclapply0 <- function(x, f, mc.cores = 0) {
if (mc.cores == 0) {
return(lapply(x, f))
} else {
return(mclapply(x, f, mc.cores = mc.cores))
}
}
| /lineId/R/utils.R | no_license | snarles/fmri | R | false | false | 1,908 | r |
#' Similar to attach, but overwrites global
#'
#' @param ll list with named values
#' @export
#' @examples
#' zattach(list(a = 5))
#' a
zattach <- function(ll) {
for (i in 1:length(ll)) {
assign(names(ll)[i], ll[[i]], envir=globalenv())
}
}
#' Similar to do.call, but allows extra arguments
#'
#' @param f function to be evaluated
#' @param ll list containing arguments
#' @export
#' @examples
#' do.call2(cbind, list(a = rep(0, 3), b = rep(1, 3), c = rep(2, 3)), a = c(1, 2, 3), d = c(4, 5, 4))
do.call2 <- function(f, ll, ...) {
dots <- eval(substitute(alist(...)))
ll <- modifyList(ll, dots)
do.call(f, ll)
}
#' Converts list(list(fields)) into list(fields(list))
#'
#' @param dots list of lists
#' @export
#' @examples
#' listcomb(list(list(a = 5, b = 3), list(a = 2, b = 4)))
listcomb <- function(dots) {
nms <- names(dots[[1]])
ans <- as.list(nms)
names(ans) <- nms
for (nm in nms) {
ans[[nm]] <- lapply(1:length(dots), function(i) dots[[i]][[nm]])
}
ans
}
#' Calls mclapply and combines the end result using listcomb
#'
#' @param x List of arguments
#' @param f Single-argument function
#' @param mc.cores Number of cores: 0 means use lapply
#' @import parallel
#' @export
#' @examples
#' lclapply(1:10, function(i) list(a = i, b = i^2), mc.cores = 1)
lclapply <- function(x, f, mc.cores = 0) {
if (mc.cores == 0) {
return(listcomb(lapply(x, f)))
} else {
return(listcomb(mclapply(x, f, mc.cores = mc.cores)))
}
}
#' Either uses lapply or mclapply
#'
#' @param x List of arguments
#' @param f Single-argument function
#' @param mc.cores Number of cores: 0 means use lapply
#' @import parallel
#' @export
#' @examples
#' lclapply(1:10, function(i) list(a = i, b = i^2), mc.cores = 1)
mclapply0 <- function(x, f, mc.cores = 0) {
if (mc.cores == 0) {
return(lapply(x, f))
} else {
return(mclapply(x, f, mc.cores = mc.cores))
}
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 45832
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 45832
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/trafficlight-controller/tlc05-nonuniform-depth-49.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 17401
c no.of clauses 45832
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 45832
c
c QBFLIB/Miller-Marin/trafficlight-controller/tlc05-nonuniform-depth-49.qdimacs 17401 45832 E1 [] 0 100 17153 45832 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Miller-Marin/trafficlight-controller/tlc05-nonuniform-depth-49/tlc05-nonuniform-depth-49.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 695 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 45832
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 45832
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/trafficlight-controller/tlc05-nonuniform-depth-49.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 17401
c no.of clauses 45832
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 45832
c
c QBFLIB/Miller-Marin/trafficlight-controller/tlc05-nonuniform-depth-49.qdimacs 17401 45832 E1 [] 0 100 17153 45832 NONE
|
#####################################################
### Load Data source #######
#####################################################
setwd("C:/Users/fanir/Desktop/Simulation_juin2018/Data")
#####################################################
### Clean the repertoir #######
#####################################################
rm(list=ls())
gc()
library(compiler)
enableJIT(1)
enableJIT(3)
library("fBasics")
#library("pracma")
library("numDeriv")
library("nlme")
library("Matrix")
#####################################################
### Load both data.contract and data.ret #######
#####################################################
load("DataPrice20092010.Rdata")
#####################################################
### Data set #######
#####################################################
#Data.N=Data.N2
Data.N=Data.N2[-c(506,1462,1638,1645),]
#####################################################
### Source function to use #######
#####################################################
source("C:/Users/fanir/Desktop/Simulation_juin2018/NGARCH/N Esscher returns-VIX_NonCentre/NGARCH_returns_loglike.R")
source("C:/Users/fanir/Desktop/Simulation_juin2018/NGARCH/N Esscher returns-VIX_NonCentre/Log_Mixte_VIX_Ret.r")
source("C:/Users/fanir/Desktop/Simulation_juin2018/NGARCH/N Esscher returns-VIX_NonCentre/Loglik_VIX_NGARCH.r")
#####################################################
### Parameters of the model #######
#####################################################
### Initial parameter ####
## a0=para_h[1]; b1=para_h[2]; a1=para_h[3]; gama= para_h[4]; lambda= para_h[5]; a=para_h[6]; b=para_h[7] ; c=para_h[8]; d=para_h[9] ; ro=para_h[10]
para_h<-c(2.062e-06 , 0.9339 ,0.03848 , 0.6348, 0.152 , 1.02 , 0.05735, 0.1578 , 1.26842, 0.95417) ## RMSE2$rmse : RMSE3$rmse : 0.04541123
#####################################################
### Volatility #######
#####################################################
ts.vol_P= shape_vol_P(para_h, Data.returns)
ts.plot(ts.vol_P, col = "steelblue", main = "IG Garch Model",xlab="2009",ylab="Volatility")
grid()
#####################################################
### LOg values #######
#####################################################
start.time <- Sys.time()
ILK= NGARCH_likelihood_MixViX(para_h, Data.returns)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
ILK
#####################################################
### Optimization of the model #######
#####################################################
start.time <- Sys.time()
Sol=optim(para_h,NGARCH_likelihood_MixViX,Data.returns=Data.returns, method="Nelder-Mead",control = list(maxit = 10000))
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
Sol
para_h1<-Sol$par
para_h<-Sol$par
#####################################################
### Part 3 RMSE Simulation #######
#####################################################
### Source function to use #######
#####################################################
##source("C:/Users/e0g411k03dt/Desktop/Estimation Paper 2 Mars 2016/Simulation VIX HN/VIX Heston N/Simulation MC VIX HN.r")
source("C:/Users/fanir/Desktop/Simulation_juin2018/NGARCH/N Esscher returns-VIX_NonCentre/Simulation_MC_Pricer_return_NGARCH.r")
source("C:/Users/fanir/Desktop/Simulation_juin2018/NGARCH/N Esscher returns-VIX_NonCentre/Function_Pricer_return_NGARCH.r")
source("C:/Users/fanir/Desktop/Simulation_juin2018/NGARCH/N Esscher returns-VIX_NonCentre/RMSES_return_NGARCH.r")
N= 8^10
#Pricer(N,para_h1,Data.N)$P
############################################################
#### RMSE ##
############################################################
start.time <- Sys.time()
RMSE2=RMSEsim(para_h1,Data.N)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
RMSE2$rmse
RMSE2$P
#####################################################
### Load both data.contract and data.ret #######
#####################################################
load("DataPrice20112012.Rdata")
Data.N=Data.N2
############################################################
#### RMSE ##
############################################################
start.time <- Sys.time()
RMSE2010=RMSEsim(para_h1,Data.N)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
RMSE2010$rmse
############################################################
#### Compare VIX ##
############################################################
source("C:/Users/fanir/Desktop/Simulation_juin2018/NGARCH/N Esscher returns-VIX_NonCentre/Comparing_VIX_oti_ret_Ess.R")
start.time <- Sys.time()
C_VIX= Compa_vix(para_h1,Data.returns)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
C_VIX
#####################################################
### Load both data.contract and data.ret #######
#####################################################
load("DataPrice20112012.Rdata")
############################################################
#### out sample RMSE ##
############################################################
Data.N=Data.N2
start.time <- Sys.time()
RMSE2=RMSE(para_h1,Data.ret,Data.N)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
RMSE2$rmse
| /estimationJob/NGARCH/N_Ess_ret_VIX_NonCentre/Methode_Ret_VIX.R | no_license | Fanirisoa/dynamic_pricing | R | false | false | 5,809 | r | #####################################################
### Load Data source #######
#####################################################
setwd("C:/Users/fanir/Desktop/Simulation_juin2018/Data")
#####################################################
### Clean the repertoir #######
#####################################################
rm(list=ls())
gc()
library(compiler)
enableJIT(1)
enableJIT(3)
library("fBasics")
#library("pracma")
library("numDeriv")
library("nlme")
library("Matrix")
#####################################################
### Load both data.contract and data.ret #######
#####################################################
load("DataPrice20092010.Rdata")
#####################################################
### Data set #######
#####################################################
#Data.N=Data.N2
Data.N=Data.N2[-c(506,1462,1638,1645),]
#####################################################
### Source function to use #######
#####################################################
source("C:/Users/fanir/Desktop/Simulation_juin2018/NGARCH/N Esscher returns-VIX_NonCentre/NGARCH_returns_loglike.R")
source("C:/Users/fanir/Desktop/Simulation_juin2018/NGARCH/N Esscher returns-VIX_NonCentre/Log_Mixte_VIX_Ret.r")
source("C:/Users/fanir/Desktop/Simulation_juin2018/NGARCH/N Esscher returns-VIX_NonCentre/Loglik_VIX_NGARCH.r")
#####################################################
### Parameters of the model #######
#####################################################
### Initial parameter ####
## a0=para_h[1]; b1=para_h[2]; a1=para_h[3]; gama= para_h[4]; lambda= para_h[5]; a=para_h[6]; b=para_h[7] ; c=para_h[8]; d=para_h[9] ; ro=para_h[10]
para_h<-c(2.062e-06 , 0.9339 ,0.03848 , 0.6348, 0.152 , 1.02 , 0.05735, 0.1578 , 1.26842, 0.95417) ## RMSE2$rmse : RMSE3$rmse : 0.04541123
#####################################################
### Volatility #######
#####################################################
ts.vol_P= shape_vol_P(para_h, Data.returns)
ts.plot(ts.vol_P, col = "steelblue", main = "IG Garch Model",xlab="2009",ylab="Volatility")
grid()
#####################################################
### LOg values #######
#####################################################
start.time <- Sys.time()
ILK= NGARCH_likelihood_MixViX(para_h, Data.returns)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
ILK
#####################################################
### Optimization of the model #######
#####################################################
start.time <- Sys.time()
Sol=optim(para_h,NGARCH_likelihood_MixViX,Data.returns=Data.returns, method="Nelder-Mead",control = list(maxit = 10000))
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
Sol
para_h1<-Sol$par
para_h<-Sol$par
#####################################################
### Part 3 RMSE Simulation #######
#####################################################
### Source function to use #######
#####################################################
##source("C:/Users/e0g411k03dt/Desktop/Estimation Paper 2 Mars 2016/Simulation VIX HN/VIX Heston N/Simulation MC VIX HN.r")
source("C:/Users/fanir/Desktop/Simulation_juin2018/NGARCH/N Esscher returns-VIX_NonCentre/Simulation_MC_Pricer_return_NGARCH.r")
source("C:/Users/fanir/Desktop/Simulation_juin2018/NGARCH/N Esscher returns-VIX_NonCentre/Function_Pricer_return_NGARCH.r")
source("C:/Users/fanir/Desktop/Simulation_juin2018/NGARCH/N Esscher returns-VIX_NonCentre/RMSES_return_NGARCH.r")
N= 8^10
#Pricer(N,para_h1,Data.N)$P
############################################################
#### RMSE ##
############################################################
start.time <- Sys.time()
RMSE2=RMSEsim(para_h1,Data.N)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
RMSE2$rmse
RMSE2$P
#####################################################
### Load both data.contract and data.ret #######
#####################################################
load("DataPrice20112012.Rdata")
Data.N=Data.N2
############################################################
#### RMSE ##
############################################################
start.time <- Sys.time()
RMSE2010=RMSEsim(para_h1,Data.N)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
RMSE2010$rmse
############################################################
#### Compare VIX ##
############################################################
source("C:/Users/fanir/Desktop/Simulation_juin2018/NGARCH/N Esscher returns-VIX_NonCentre/Comparing_VIX_oti_ret_Ess.R")
start.time <- Sys.time()
C_VIX= Compa_vix(para_h1,Data.returns)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
C_VIX
#####################################################
### Load both data.contract and data.ret #######
#####################################################
load("DataPrice20112012.Rdata")
############################################################
#### out sample RMSE ##
############################################################
Data.N=Data.N2
start.time <- Sys.time()
RMSE2=RMSE(para_h1,Data.ret,Data.N)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
RMSE2$rmse
|
save_question_submission <- function(session, label, question, answer) {
save_object(
session = session,
object_id = label,
tutorial_object("question_submission", list(
api_version = 1,
question = question,
answer = answer
))
)
}
save_reset_question_submission <- function(session, label, question) {
save_object(
session = session,
object_id = label,
tutorial_object("question_submission", list(
question = question,
reset = TRUE
))
)
}
save_exercise_submission <- function(session, label, code, output, error_message, checked, feedback) {
# for client storage we only forward error output. this is because we want
# to replay errors back into the client with no execution (in case they were
# timeout errors as a result of misbehaving code). for other outputs the client
# will just tickle the inputs to force re-execution of the outputs.
storage <- tutorial_storage(session)
if (identical(storage$type, "client")) {
if (!is.null(error_message) && !identical(error_message, ""))
output <- error_message_html(error_message)
else
output <- NULL
}
# save object
save_object(
session = session,
object_id = label,
tutorial_object("exercise_submission", list(
code = code,
output = output,
checked = checked,
feedback = feedback
))
)
}
save_section_skipped <- function(session, sectionId) {
save_object(
session = session,
object_id = ns_wrap("section_skipped", sectionId),
tutorial_object("section_skipped", list())
)
}
save_video_progress <- function(session, video_url, time, total_time) {
save_object(
session = session,
object_id = video_url,
tutorial_object("video_progress", list(
time = time,
total_time = total_time
))
)
}
client_state_object_id <- "tutorial-client-state-825E9CBB-FF7A-4C2C-A201-A075AB758F34"
save_client_state <- function(session, data) {
save_object(
session = session,
object_id = client_state_object_id,
tutorial_object("client_state", data)
)
}
get_client_state <- function(session) {
object <- get_object(session, client_state_object_id)
if (!is.null(object))
object$data
else
list()
}
get_exercise_submission <- function(session, label) {
get_object(session = session, object_id = label)
}
get_all_state_objects <- function(session, exercise_output = TRUE) {
# get all of the objects
objects <- get_objects(session)
# strip output (the client doesn't need it and it's expensive to transmit)
objects <- lapply(objects, function(object) {
if (object$type == "exercise_submission") {
if (!exercise_output) {
object$data["output"] <- list(NULL)
}
}
object
})
# return objects
objects
}
filter_state_objects <- function(state_objects, types) {
Filter(x = state_objects, function(object) {
object$type %in% types
})
}
submissions_from_state_objects <- function(state_objects) {
filtered_submissions <- filter_state_objects(state_objects, c("question_submission", "exercise_submission"))
Filter(x = filtered_submissions, function(object) {
# only return answered question, not reset questions
if (object$type == "question_submission") {
!isTRUE(object$data$reset)
} else {
TRUE
}
})
}
video_progress_from_state_objects <- function(state_objects) {
filter_state_objects(state_objects, c("video_progress"))
}
section_skipped_progress_from_state_objects <- function(state_objects) {
filter_state_objects(state_objects, c("section_skipped"))
}
progress_events_from_state_objects <- function(state_objects) {
# first submissions
submissions <- submissions_from_state_objects(state_objects)
progress_events <- lapply(submissions, function(submission) {
data <- list(
label = submission$id
)
if (submission$type == "question_submission") {
data$answer <- submission$data$answer
}
else if (submission$type == "exercise_submission") {
if (!is.null(submission$data$feedback))
correct <- submission$data$feedback$correct
else
correct <- TRUE
data$correct <- correct
}
list(event = submission$type,
data = data)
})
# now sections skipped
section_skipped_progress <- section_skipped_progress_from_state_objects(state_objects)
section_skipped_progress_events <- lapply(section_skipped_progress, function(skipped) {
list(event = "section_skipped",
data = list(
sectionId = ns_unwrap("section_skipped", skipped$id)
))
})
progress_events <- append(progress_events, section_skipped_progress_events)
# now video_progress
video_progress <- video_progress_from_state_objects(state_objects)
video_progress_events <- lapply(video_progress, function(progress) {
list(event = "video_progress",
data = list(
video_url = progress$id,
time = progress$data$time,
total_time = progress$data$total_time
))
})
progress_events <- append(progress_events, video_progress_events)
# return progress events
progress_events
}
save_object <- function(session, object_id, data) {
tutorial_id <- read_request(session, "tutorial.tutorial_id")
tutorial_version <- read_request(session, "tutorial.tutorial_version")
user_id <- read_request(session, "tutorial.user_id")
data$id <- object_id
tutorial_storage(session)$save_object(tutorial_id, tutorial_version, user_id, object_id, data)
}
update_object <- function(object) {
if (is.null(object)) {
return(object)
}
if (identical(object$type, "question_submission")) {
api_version <- object$data$api_version
if (!is.null(api_version)) {
# if (identical(version, 1)) {
# # do nothing
# }
} else {
# as of v0.10.0...
# upgrade from old storage format to new storage format
# rename answers -> answer
object$data$answer <- object$data$answers
object$data$answers <- NULL
}
}
object
}
get_object <- function(session, object_id) {
tutorial_id <- read_request(session, "tutorial.tutorial_id")
tutorial_version <- read_request(session, "tutorial.tutorial_version")
user_id <- read_request(session, "tutorial.user_id")
object <- tutorial_storage(session)$get_object(tutorial_id, tutorial_version, user_id, object_id)
update_object(object)
}
get_objects <- function(session) {
tutorial_id <- read_request(session, "tutorial.tutorial_id")
tutorial_version <- read_request(session, "tutorial.tutorial_version")
user_id <- read_request(session, "tutorial.user_id")
objects <- tutorial_storage(session)$get_objects(tutorial_id, tutorial_version, user_id)
lapply(objects, update_object)
}
remove_all_objects <- function(session) {
tutorial_id <- read_request(session, "tutorial.tutorial_id")
tutorial_version <- read_request(session, "tutorial.tutorial_version")
user_id <- read_request(session, "tutorial.user_id")
tutorial_storage(session)$remove_all_objects(tutorial_id, tutorial_version, user_id)
}
initialize_objects_from_client <- function(session, objects) {
tutorial_id <- read_request(session, "tutorial.tutorial_id")
tutorial_version <- read_request(session, "tutorial.tutorial_version")
user_id <- read_request(session, "tutorial.user_id")
client_storage(session)$initialize_objects_from_client(tutorial_id,
tutorial_version,
user_id,
objects)
}
# helper to form a tutor object (type + data)
tutorial_object <- function(type, data) {
list(
type = type,
data = data
)
}
ns_wrap <- function(ns, id) {
paste0(ns, id)
}
ns_unwrap <- function(ns, id) {
substring(id, nchar(ns) + 1)
}
# get the currently active storage handler
tutorial_storage <- function(session) {
# local storage implementation
local_storage <- filesystem_storage(
file.path(rappdirs::user_data_dir(), "R", "learnr", "tutorial", "storage")
)
# function to determine "auto" storage
auto_storage <- function() {
if (getOption("shiny.testmode", default = FALSE)) {
# With shinytest, we don't want to restore state; start with a clean slate
# each time.
return(no_storage())
}
location <- read_request(session, "tutorial.http_location")
if (is_localhost(location))
local_storage
else
client_storage(session)
}
# examine the option
storage <- getOption("tutorial.storage", default = "auto")
# resolve NULL to "none"
if (is.null(storage))
storage <- "none"
# if it's a character vector then resolve it
if (is.character(storage)) {
storage <- switch(storage,
auto = auto_storage(),
local = local_storage,
client = client_storage(session),
none = no_storage()
)
}
# verify that storage is a list
if (!is.list(storage))
stop("tutorial.storage must be a 'auto', 'local', 'client', 'none' or a ",
"list of storage functions")
# validate storage interface
if (is.null(storage$save_object))
stop("tutorial.storage must implement the save_object function")
if (is.null(storage$get_object))
stop("tutorial.storage must implement the get_object function")
if (is.null(storage$get_objects))
stop("tutorial.storage must implements the get_objects function")
# return it
storage
}
#' Filesystem-based storage for tutor state data
#'
#' Tutorial state storage handler that uses the filesystem as a backing store.
#' The directory will contain tutorial state data partitioned by `user_id`,
#' `tutorial_id`, and `tutorial_version` (in that order)
#'
#' @param dir Directory to store state data within
#' @param compress Should \code{.rds} files be compressed?
#'
#' @return Storage handler suitable for \code{options(tutorial.storage = ...)}
#'
#' @export
filesystem_storage <- function(dir, compress = TRUE) {
# helpers to transform ids into valid filesystem paths
id_to_filesystem_path <- function(id) {
id <- gsub("..", "", id, fixed = TRUE)
utils::URLencode(id, reserved = TRUE, repeated = TRUE)
}
id_from_filesystem_path <- function(path) {
utils::URLdecode(path)
}
# get the path to storage (ensuring that the directory exists)
storage_path <- function(tutorial_id, tutorial_version, user_id) {
path <- file.path(dir,
id_to_filesystem_path(user_id),
id_to_filesystem_path(tutorial_id),
id_to_filesystem_path(tutorial_version))
if (!utils::file_test("-d", path))
dir.create(path, recursive = TRUE)
path
}
# functions which implement storage via saving to RDS
list(
type = "local",
save_object = function(tutorial_id, tutorial_version, user_id, object_id, data) {
object_path <- file.path(storage_path(tutorial_id, tutorial_version, user_id),
paste0(id_to_filesystem_path(object_id), ".rds"))
saveRDS(data, file = object_path, compress = compress)
},
get_object = function(tutorial_id, tutorial_version, user_id, object_id) {
object_path <- file.path(storage_path(tutorial_id, tutorial_version, user_id),
paste0(id_to_filesystem_path(object_id), ".rds"))
if (file.exists(object_path))
readRDS(object_path)
else
NULL
},
get_objects = function(tutorial_id, tutorial_version, user_id) {
objects_path <- storage_path(tutorial_id, tutorial_version, user_id)
objects <- list()
for (object_path in list.files(objects_path, pattern = utils::glob2rx("*.rds"))) {
object <- readRDS(file.path(objects_path, object_path))
object_id <- sub("\\.rds$", "", id_from_filesystem_path(object_path))
objects[[length(objects) + 1]] <- object
}
objects
},
remove_all_objects = function(tutorial_id, tutorial_version, user_id) {
objects_path <- storage_path(tutorial_id, tutorial_version, user_id)
unlink(objects_path, recursive = TRUE)
}
)
}
# client side storage implementation. data is saved by broadcasting it to the client
# this data is subsequently restored during initialize and stored in a per-session
# in memory table for retreival
client_storage <- function(session) {
# helper to form a unique tutorial context id (note that we don't utilize the user_id
# as there is no concept of server-side user in client_storage, user scope is 100%
# determined by connecting user agent)
tutorial_context_id <- function(tutorial_id, tutorial_version) {
paste(tutorial_id, tutorial_version, sep = "-")
}
# get a reference to the session object cache for a gvien tutorial context
object_store <- function(context_id) {
# create session objects on demand
session_objects <- read_request(session, "tutorial.session_objects")
if (is.null(session_objects)) {
session_objects <- new.env(parent = emptyenv())
write_request(session, "tutorial.session_objects", session_objects)
}
# create entry for this context on demand
if (!exists(context_id, envir = session_objects))
assign(context_id, new.env(parent = emptyenv()), envir = session_objects)
store <- get(context_id, envir = session_objects)
# return reference to the store
store
}
list(
type = "client",
save_object = function(tutorial_id, tutorial_version, user_id, object_id, data) {
# save the object to our in-memory store
context_id <- tutorial_context_id(tutorial_id, tutorial_version)
store <- object_store(context_id)
# scrub "answers" from the state stored on the client (client storage only)
data <- scrub_correct_and_feedback(data)
assign(object_id, data, envir = store)
# broadcast to client
session$sendCustomMessage("tutorial.store_object", list(
context = context_id,
id = object_id,
data = base64_enc(serialize(data, connection = NULL))
))
},
get_object = function(tutorial_id, tutorial_version, user_id, object_id) {
context_id <- tutorial_context_id(tutorial_id, tutorial_version)
store <- object_store(context_id)
if (exists(object_id, envir = store))
get(object_id, envir = store)
else
NULL
},
get_objects = function(tutorial_id, tutorial_version, user_id) {
context_id <- tutorial_context_id(tutorial_id, tutorial_version)
store <- object_store(context_id)
objects <- list()
for (object in ls(store))
objects[[length(objects) + 1]] <- get(object, envir = store)
objects
},
remove_all_objects = function(tutorial_id, tutorial_version, user_id) {
# remove on server side (client side is handled on client)
context_id <- tutorial_context_id(tutorial_id, tutorial_version)
store <- object_store(context_id)
rm(list = ls(store), envir = store)
},
# function called from initialize to prime object storage from the browser db
initialize_objects_from_client = function(tutorial_id, tutorial_version, user_id, objects) {
context_id <- tutorial_context_id(tutorial_id, tutorial_version)
store <- object_store(context_id)
for (object_id in names(objects)) {
data <- unserialize(base64_dec(objects[[object_id]]))
assign(object_id, data, envir = store)
}
}
)
}
# no-op storage implementation
no_storage <- function() {
list(
type = "none",
save_object = function(tutorial_id, tutorial_version, user_id, object_id, data) {},
get_object = function(tutorial_id, tutorial_version, user_id, object_id) { NULL },
get_objects = function(tutorial_id, tutorial_version, user_id) { list() },
remove_all_objects = function(tutorial_id, tutorial_version, user_id) {}
)
}
scrub_correct_and_feedback <- function(data) {
if (is.null(data)) {
return(data)
}
if (identical(data$type, "question_submission")) {
data$data$correct <- NULL
}
if (identical(data$type, "exercise_submission")) {
data$data$feedback <- NULL
}
data
}
| /R/storage.R | permissive | rstudio/learnr | R | false | false | 16,210 | r |
save_question_submission <- function(session, label, question, answer) {
save_object(
session = session,
object_id = label,
tutorial_object("question_submission", list(
api_version = 1,
question = question,
answer = answer
))
)
}
save_reset_question_submission <- function(session, label, question) {
save_object(
session = session,
object_id = label,
tutorial_object("question_submission", list(
question = question,
reset = TRUE
))
)
}
save_exercise_submission <- function(session, label, code, output, error_message, checked, feedback) {
# for client storage we only forward error output. this is because we want
# to replay errors back into the client with no execution (in case they were
# timeout errors as a result of misbehaving code). for other outputs the client
# will just tickle the inputs to force re-execution of the outputs.
storage <- tutorial_storage(session)
if (identical(storage$type, "client")) {
if (!is.null(error_message) && !identical(error_message, ""))
output <- error_message_html(error_message)
else
output <- NULL
}
# save object
save_object(
session = session,
object_id = label,
tutorial_object("exercise_submission", list(
code = code,
output = output,
checked = checked,
feedback = feedback
))
)
}
save_section_skipped <- function(session, sectionId) {
save_object(
session = session,
object_id = ns_wrap("section_skipped", sectionId),
tutorial_object("section_skipped", list())
)
}
save_video_progress <- function(session, video_url, time, total_time) {
save_object(
session = session,
object_id = video_url,
tutorial_object("video_progress", list(
time = time,
total_time = total_time
))
)
}
client_state_object_id <- "tutorial-client-state-825E9CBB-FF7A-4C2C-A201-A075AB758F34"
save_client_state <- function(session, data) {
save_object(
session = session,
object_id = client_state_object_id,
tutorial_object("client_state", data)
)
}
get_client_state <- function(session) {
object <- get_object(session, client_state_object_id)
if (!is.null(object))
object$data
else
list()
}
get_exercise_submission <- function(session, label) {
get_object(session = session, object_id = label)
}
get_all_state_objects <- function(session, exercise_output = TRUE) {
# get all of the objects
objects <- get_objects(session)
# strip output (the client doesn't need it and it's expensive to transmit)
objects <- lapply(objects, function(object) {
if (object$type == "exercise_submission") {
if (!exercise_output) {
object$data["output"] <- list(NULL)
}
}
object
})
# return objects
objects
}
filter_state_objects <- function(state_objects, types) {
Filter(x = state_objects, function(object) {
object$type %in% types
})
}
submissions_from_state_objects <- function(state_objects) {
filtered_submissions <- filter_state_objects(state_objects, c("question_submission", "exercise_submission"))
Filter(x = filtered_submissions, function(object) {
# only return answered question, not reset questions
if (object$type == "question_submission") {
!isTRUE(object$data$reset)
} else {
TRUE
}
})
}
video_progress_from_state_objects <- function(state_objects) {
filter_state_objects(state_objects, c("video_progress"))
}
section_skipped_progress_from_state_objects <- function(state_objects) {
filter_state_objects(state_objects, c("section_skipped"))
}
progress_events_from_state_objects <- function(state_objects) {
# first submissions
submissions <- submissions_from_state_objects(state_objects)
progress_events <- lapply(submissions, function(submission) {
data <- list(
label = submission$id
)
if (submission$type == "question_submission") {
data$answer <- submission$data$answer
}
else if (submission$type == "exercise_submission") {
if (!is.null(submission$data$feedback))
correct <- submission$data$feedback$correct
else
correct <- TRUE
data$correct <- correct
}
list(event = submission$type,
data = data)
})
# now sections skipped
section_skipped_progress <- section_skipped_progress_from_state_objects(state_objects)
section_skipped_progress_events <- lapply(section_skipped_progress, function(skipped) {
list(event = "section_skipped",
data = list(
sectionId = ns_unwrap("section_skipped", skipped$id)
))
})
progress_events <- append(progress_events, section_skipped_progress_events)
# now video_progress
video_progress <- video_progress_from_state_objects(state_objects)
video_progress_events <- lapply(video_progress, function(progress) {
list(event = "video_progress",
data = list(
video_url = progress$id,
time = progress$data$time,
total_time = progress$data$total_time
))
})
progress_events <- append(progress_events, video_progress_events)
# return progress events
progress_events
}
save_object <- function(session, object_id, data) {
tutorial_id <- read_request(session, "tutorial.tutorial_id")
tutorial_version <- read_request(session, "tutorial.tutorial_version")
user_id <- read_request(session, "tutorial.user_id")
data$id <- object_id
tutorial_storage(session)$save_object(tutorial_id, tutorial_version, user_id, object_id, data)
}
update_object <- function(object) {
if (is.null(object)) {
return(object)
}
if (identical(object$type, "question_submission")) {
api_version <- object$data$api_version
if (!is.null(api_version)) {
# if (identical(version, 1)) {
# # do nothing
# }
} else {
# as of v0.10.0...
# upgrade from old storage format to new storage format
# rename answers -> answer
object$data$answer <- object$data$answers
object$data$answers <- NULL
}
}
object
}
get_object <- function(session, object_id) {
tutorial_id <- read_request(session, "tutorial.tutorial_id")
tutorial_version <- read_request(session, "tutorial.tutorial_version")
user_id <- read_request(session, "tutorial.user_id")
object <- tutorial_storage(session)$get_object(tutorial_id, tutorial_version, user_id, object_id)
update_object(object)
}
get_objects <- function(session) {
tutorial_id <- read_request(session, "tutorial.tutorial_id")
tutorial_version <- read_request(session, "tutorial.tutorial_version")
user_id <- read_request(session, "tutorial.user_id")
objects <- tutorial_storage(session)$get_objects(tutorial_id, tutorial_version, user_id)
lapply(objects, update_object)
}
remove_all_objects <- function(session) {
tutorial_id <- read_request(session, "tutorial.tutorial_id")
tutorial_version <- read_request(session, "tutorial.tutorial_version")
user_id <- read_request(session, "tutorial.user_id")
tutorial_storage(session)$remove_all_objects(tutorial_id, tutorial_version, user_id)
}
initialize_objects_from_client <- function(session, objects) {
tutorial_id <- read_request(session, "tutorial.tutorial_id")
tutorial_version <- read_request(session, "tutorial.tutorial_version")
user_id <- read_request(session, "tutorial.user_id")
client_storage(session)$initialize_objects_from_client(tutorial_id,
tutorial_version,
user_id,
objects)
}
# helper to form a tutor object (type + data)
tutorial_object <- function(type, data) {
list(
type = type,
data = data
)
}
ns_wrap <- function(ns, id) {
paste0(ns, id)
}
ns_unwrap <- function(ns, id) {
substring(id, nchar(ns) + 1)
}
# get the currently active storage handler
tutorial_storage <- function(session) {
# local storage implementation
local_storage <- filesystem_storage(
file.path(rappdirs::user_data_dir(), "R", "learnr", "tutorial", "storage")
)
# function to determine "auto" storage
auto_storage <- function() {
if (getOption("shiny.testmode", default = FALSE)) {
# With shinytest, we don't want to restore state; start with a clean slate
# each time.
return(no_storage())
}
location <- read_request(session, "tutorial.http_location")
if (is_localhost(location))
local_storage
else
client_storage(session)
}
# examine the option
storage <- getOption("tutorial.storage", default = "auto")
# resolve NULL to "none"
if (is.null(storage))
storage <- "none"
# if it's a character vector then resolve it
if (is.character(storage)) {
storage <- switch(storage,
auto = auto_storage(),
local = local_storage,
client = client_storage(session),
none = no_storage()
)
}
# verify that storage is a list
if (!is.list(storage))
stop("tutorial.storage must be a 'auto', 'local', 'client', 'none' or a ",
"list of storage functions")
# validate storage interface
if (is.null(storage$save_object))
stop("tutorial.storage must implement the save_object function")
if (is.null(storage$get_object))
stop("tutorial.storage must implement the get_object function")
if (is.null(storage$get_objects))
stop("tutorial.storage must implements the get_objects function")
# return it
storage
}
#' Filesystem-based storage for tutor state data
#'
#' Tutorial state storage handler that uses the filesystem as a backing store.
#' The directory will contain tutorial state data partitioned by `user_id`,
#' `tutorial_id`, and `tutorial_version` (in that order)
#'
#' @param dir Directory to store state data within
#' @param compress Should \code{.rds} files be compressed?
#'
#' @return Storage handler suitable for \code{options(tutorial.storage = ...)}
#'
#' @export
filesystem_storage <- function(dir, compress = TRUE) {
# helpers to transform ids into valid filesystem paths
id_to_filesystem_path <- function(id) {
id <- gsub("..", "", id, fixed = TRUE)
utils::URLencode(id, reserved = TRUE, repeated = TRUE)
}
id_from_filesystem_path <- function(path) {
utils::URLdecode(path)
}
# get the path to storage (ensuring that the directory exists)
storage_path <- function(tutorial_id, tutorial_version, user_id) {
path <- file.path(dir,
id_to_filesystem_path(user_id),
id_to_filesystem_path(tutorial_id),
id_to_filesystem_path(tutorial_version))
if (!utils::file_test("-d", path))
dir.create(path, recursive = TRUE)
path
}
# functions which implement storage via saving to RDS
list(
type = "local",
save_object = function(tutorial_id, tutorial_version, user_id, object_id, data) {
object_path <- file.path(storage_path(tutorial_id, tutorial_version, user_id),
paste0(id_to_filesystem_path(object_id), ".rds"))
saveRDS(data, file = object_path, compress = compress)
},
get_object = function(tutorial_id, tutorial_version, user_id, object_id) {
object_path <- file.path(storage_path(tutorial_id, tutorial_version, user_id),
paste0(id_to_filesystem_path(object_id), ".rds"))
if (file.exists(object_path))
readRDS(object_path)
else
NULL
},
get_objects = function(tutorial_id, tutorial_version, user_id) {
objects_path <- storage_path(tutorial_id, tutorial_version, user_id)
objects <- list()
for (object_path in list.files(objects_path, pattern = utils::glob2rx("*.rds"))) {
object <- readRDS(file.path(objects_path, object_path))
object_id <- sub("\\.rds$", "", id_from_filesystem_path(object_path))
objects[[length(objects) + 1]] <- object
}
objects
},
remove_all_objects = function(tutorial_id, tutorial_version, user_id) {
objects_path <- storage_path(tutorial_id, tutorial_version, user_id)
unlink(objects_path, recursive = TRUE)
}
)
}
# client side storage implementation. data is saved by broadcasting it to the client
# this data is subsequently restored during initialize and stored in a per-session
# in memory table for retreival
client_storage <- function(session) {
# helper to form a unique tutorial context id (note that we don't utilize the user_id
# as there is no concept of server-side user in client_storage, user scope is 100%
# determined by connecting user agent)
tutorial_context_id <- function(tutorial_id, tutorial_version) {
paste(tutorial_id, tutorial_version, sep = "-")
}
# get a reference to the session object cache for a gvien tutorial context
object_store <- function(context_id) {
# create session objects on demand
session_objects <- read_request(session, "tutorial.session_objects")
if (is.null(session_objects)) {
session_objects <- new.env(parent = emptyenv())
write_request(session, "tutorial.session_objects", session_objects)
}
# create entry for this context on demand
if (!exists(context_id, envir = session_objects))
assign(context_id, new.env(parent = emptyenv()), envir = session_objects)
store <- get(context_id, envir = session_objects)
# return reference to the store
store
}
list(
type = "client",
save_object = function(tutorial_id, tutorial_version, user_id, object_id, data) {
# save the object to our in-memory store
context_id <- tutorial_context_id(tutorial_id, tutorial_version)
store <- object_store(context_id)
# scrub "answers" from the state stored on the client (client storage only)
data <- scrub_correct_and_feedback(data)
assign(object_id, data, envir = store)
# broadcast to client
session$sendCustomMessage("tutorial.store_object", list(
context = context_id,
id = object_id,
data = base64_enc(serialize(data, connection = NULL))
))
},
get_object = function(tutorial_id, tutorial_version, user_id, object_id) {
context_id <- tutorial_context_id(tutorial_id, tutorial_version)
store <- object_store(context_id)
if (exists(object_id, envir = store))
get(object_id, envir = store)
else
NULL
},
get_objects = function(tutorial_id, tutorial_version, user_id) {
context_id <- tutorial_context_id(tutorial_id, tutorial_version)
store <- object_store(context_id)
objects <- list()
for (object in ls(store))
objects[[length(objects) + 1]] <- get(object, envir = store)
objects
},
remove_all_objects = function(tutorial_id, tutorial_version, user_id) {
# remove on server side (client side is handled on client)
context_id <- tutorial_context_id(tutorial_id, tutorial_version)
store <- object_store(context_id)
rm(list = ls(store), envir = store)
},
# function called from initialize to prime object storage from the browser db
initialize_objects_from_client = function(tutorial_id, tutorial_version, user_id, objects) {
context_id <- tutorial_context_id(tutorial_id, tutorial_version)
store <- object_store(context_id)
for (object_id in names(objects)) {
data <- unserialize(base64_dec(objects[[object_id]]))
assign(object_id, data, envir = store)
}
}
)
}
# no-op storage implementation
no_storage <- function() {
list(
type = "none",
save_object = function(tutorial_id, tutorial_version, user_id, object_id, data) {},
get_object = function(tutorial_id, tutorial_version, user_id, object_id) { NULL },
get_objects = function(tutorial_id, tutorial_version, user_id) { list() },
remove_all_objects = function(tutorial_id, tutorial_version, user_id) {}
)
}
scrub_correct_and_feedback <- function(data) {
if (is.null(data)) {
return(data)
}
if (identical(data$type, "question_submission")) {
data$data$correct <- NULL
}
if (identical(data$type, "exercise_submission")) {
data$data$feedback <- NULL
}
data
}
|
#!/usr/bin/Rscript
a <- commandArgs(TRUE)
load('main.RData')
source('funcs.R')
source('quantheatplotfunc.R')
png(paste(a, '.png', sep=''), width=640)
source(paste(a, 'plot.R', sep=''))
| /articles/df0pred-3/plot.R | no_license | lpenz/lpenz.github.io | R | false | false | 189 | r | #!/usr/bin/Rscript
a <- commandArgs(TRUE)
load('main.RData')
source('funcs.R')
source('quantheatplotfunc.R')
png(paste(a, '.png', sep=''), width=640)
source(paste(a, 'plot.R', sep=''))
|
library(ape)
testtree <- read.tree("6252_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="6252_0_unrooted.txt") | /codeml_files/newick_trees_processed/6252_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("6252_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="6252_0_unrooted.txt") |
library(dplyr)
library(lubridate)
# reading relevant data
data <- read.table("household_power_consumption.txt", sep = ";", skip = 66637, nrows = 2880)
# clearing name row and unrelevant variables, creating numerics
data <- data %>% select(V3) %>% mutate(V3 = as.numeric(as.character(V3)))
# create histogram
hist(data$V3, col = "red", main = "Global Active Power",
xlab = "Global Active Power (killowatts)", ylab = "Frequency")
| /Plot1.R | no_license | galibmehadi/EDA | R | false | false | 436 | r | library(dplyr)
library(lubridate)
# reading relevant data
data <- read.table("household_power_consumption.txt", sep = ";", skip = 66637, nrows = 2880)
# clearing name row and unrelevant variables, creating numerics
data <- data %>% select(V3) %>% mutate(V3 = as.numeric(as.character(V3)))
# create histogram
hist(data$V3, col = "red", main = "Global Active Power",
xlab = "Global Active Power (killowatts)", ylab = "Frequency")
|
# Changing this file requires running update_embedded_sources.R to rebuild sources and jars.
arrow_write_record_batch <- function(df, spark_version_number = NULL) {
arrow_env_vars <- list()
if (!is.null(spark_version_number) && spark_version_number < "3.0") {
# Spark < 3 uses an old version of Arrow, so send data in the legacy format
arrow_env_vars$ARROW_PRE_0_15_IPC_FORMAT <- 1
}
withr::with_envvar(arrow_env_vars, {
# New in arrow 0.17: takes a data.frame and returns a raw buffer with Arrow data
if ("write_to_raw" %in% ls(envir = asNamespace("arrow"))) {
# Fixed in 0.17: arrow doesn't hardcode a GMT timezone anymore
# so set the local timezone to any POSIXt columns that don't have one set
# https://github.com/sparklyr/sparklyr/issues/2439
df[] <- lapply(df, function(x) {
if (inherits(x, "POSIXt") && is.null(attr(x, "tzone"))) {
attr(x, "tzone") <- Sys.timezone()
}
x
})
arrow::write_to_raw(df, format = "stream")
} else {
arrow::write_arrow(arrow::record_batch(!!!df), raw())
}
})
}
arrow_record_stream_reader <- function(stream) {
arrow::RecordBatchStreamReader$create(stream)
}
arrow_read_record_batch <- function(reader) reader$read_next_batch()
arrow_as_tibble <- function(record) as.data.frame(record)
#' A helper function to retrieve values from \code{spark_config()}
#'
#' @param config The configuration list from \code{spark_config()}
#' @param name The name of the configuration entry
#' @param default The default value to use when entry is not present
#'
#' @keywords internal
#' @export
spark_config_value <- function(config, name, default = NULL) {
if (getOption("sparklyr.test.enforce.config", FALSE) && any(grepl("^sparklyr.", name))) {
settings <- get("spark_config_settings")()
if (!any(name %in% settings$name) &&
!grepl("^sparklyr\\.shell\\.", name)) {
stop("Config value '", name[[1]], "' not described in spark_config_settings()")
}
}
name_exists <- name %in% names(config)
if (!any(name_exists)) {
name_exists <- name %in% names(options())
if (!any(name_exists)) {
value <- default
} else {
name_primary <- name[name_exists][[1]]
value <- getOption(name_primary)
}
} else {
name_primary <- name[name_exists][[1]]
value <- config[[name_primary]]
}
if (is.language(value)) value <- rlang::as_closure(value)
if (is.function(value)) value <- value()
value
}
spark_config_integer <- function(config, name, default = NULL) {
as.integer(spark_config_value(config, name, default))
}
spark_config_logical <- function(config, name, default = NULL) {
as.logical(spark_config_value(config, name, default))
}
#' Check whether the connection is open
#'
#' @param sc \code{spark_connection}
#'
#' @keywords internal
#'
#' @export
connection_is_open <- function(sc) {
UseMethod("connection_is_open")
}
read_bin <- function(con, what, n, endian = NULL) {
UseMethod("read_bin")
}
read_bin.default <- function(con, what, n, endian = NULL) {
if (is.null(endian)) readBin(con, what, n) else readBin(con, what, n, endian = endian)
}
read_bin_wait <- function(con, what, n, endian = NULL) {
sc <- con
con <- if (!is.null(sc$state) && identical(sc$state$use_monitoring, TRUE)) sc$monitoring else sc$backend
timeout <- spark_config_value(sc$config, "sparklyr.backend.timeout", 30 * 24 * 60 * 60)
progressInterval <- spark_config_value(sc$config, "sparklyr.progress.interval", 3)
result <- if (is.null(endian)) readBin(con, what, n) else readBin(con, what, n, endian = endian)
progressTimeout <- Sys.time() + progressInterval
if (is.null(sc$state$progress)) {
sc$state$progress <- new.env()
}
progressUpdated <- FALSE
waitInterval <- 0
commandStart <- Sys.time()
while (length(result) == 0 && commandStart + timeout > Sys.time()) {
Sys.sleep(waitInterval)
waitInterval <- min(0.1, waitInterval + 0.01)
result <- if (is.null(endian)) readBin(con, what, n) else readBin(con, what, n, endian = endian)
if (Sys.time() > progressTimeout) {
progressTimeout <- Sys.time() + progressInterval
if (exists("connection_progress")) {
connection_progress(sc)
progressUpdated <- TRUE
}
}
}
if (progressUpdated) connection_progress_terminated(sc)
if (commandStart + timeout <= Sys.time()) {
stop("Operation timed out, increase config option sparklyr.backend.timeout if needed.")
}
result
}
read_bin.spark_connection <- function(con, what, n, endian = NULL) {
read_bin_wait(con, what, n, endian)
}
read_bin.spark_worker_connection <- function(con, what, n, endian = NULL) {
read_bin_wait(con, what, n, endian)
}
read_bin.livy_backend <- function(con, what, n, endian = NULL) {
read_bin.default(con$rc, what, n, endian)
}
readObject <- function(con) {
# Read type first
type <- readType(con)
readTypedObject(con, type)
}
readTypedObject <- function(con, type) {
switch(type,
"i" = readInt(con),
"c" = readString(con),
"b" = readBoolean(con),
"d" = readDouble(con),
"r" = readRaw(con),
"D" = readDate(con),
"t" = readTime(con),
"a" = readArray(con),
"l" = readList(con),
"e" = readMap(con),
"s" = readStruct(con),
"f" = readFastStringArray(con),
"n" = NULL,
"j" = getJobj(con, readString(con)),
"J" = jsonlite::fromJSON(
readString(con),
simplifyDataFrame = FALSE, simplifyMatrix = FALSE
),
stop(paste("Unsupported type for deserialization", type))
)
}
readString <- function(con) {
stringLen <- readInt(con)
string <- ""
if (stringLen > 0) {
raw <- read_bin(con, raw(), stringLen, endian = "big")
if (is.element("00", raw)) {
warning("Input contains embedded nuls, removing.")
raw <- raw[raw != "00"]
}
string <- rawToChar(raw)
}
Encoding(string) <- "UTF-8"
string
}
readFastStringArray <- function(con) {
joined <- readString(con)
as.list(strsplit(joined, "\u0019")[[1]])
}
readDateArray <- function(con, n = 1) {
if (n == 0) {
as.Date(NA)
} else {
do.call(c, lapply(seq(n), function(x) readDate(con)))
}
}
readInt <- function(con, n = 1) {
if (n == 0) {
integer(0)
} else {
read_bin(con, integer(), n = n, endian = "big")
}
}
readDouble <- function(con, n = 1) {
if (n == 0) {
double(0)
} else {
read_bin(con, double(), n = n, endian = "big")
}
}
readBoolean <- function(con, n = 1) {
if (n == 0) {
logical(0)
} else {
as.logical(readInt(con, n = n))
}
}
readType <- function(con) {
rawToChar(read_bin(con, "raw", n = 1L))
}
readDate <- function(con) {
n <- readInt(con)
if (is.na(n)) {
as.Date(NA)
} else {
d <- as.Date(n, origin = "1970-01-01", tz = "UTC")
if (getOption("sparklyr.collect.datechars", FALSE)) {
as.character(d)
} else {
d
}
}
}
readTime <- function(con, n = 1) {
if (identical(n, 0)) {
as.POSIXct(character(0))
} else {
t <- readDouble(con, n)
r <- as.POSIXct(t, origin = "1970-01-01", tz = "UTC")
if (getOption("sparklyr.collect.datechars", FALSE)) {
as.character(r)
} else {
r
}
}
}
readArray <- function(con) {
type <- readType(con)
len <- readInt(con)
if (type == "d") {
return(readDouble(con, n = len))
} else if (type == "i") {
return(readInt(con, n = len))
} else if (type == "b") {
return(readBoolean(con, n = len))
} else if (type == "t") {
return(readTime(con, n = len))
} else if (type == "D") {
return(readDateArray(con, n = len))
}
if (len > 0) {
l <- vector("list", len)
for (i in seq_len(len)) {
l[[i]] <- readTypedObject(con, type)
}
l
} else {
list()
}
}
# Read a list. Types of each element may be different.
# Null objects are read as NA.
readList <- function(con) {
len <- readInt(con)
if (len > 0) {
l <- vector("list", len)
for (i in seq_len(len)) {
elem <- readObject(con)
if (is.null(elem)) {
elem <- NA
}
l[[i]] <- elem
}
l
} else {
list()
}
}
readMap <- function(con) {
map <- list()
len <- readInt(con)
if (len > 0) {
for (i in seq_len(len)) {
key <- readString(con)
value <- readObject(con)
map[[key]] <- value
}
}
map
}
# Convert a named list to struct so that
# SerDe won't confuse between a normal named list and struct
listToStruct <- function(list) {
stopifnot(class(list) == "list")
stopifnot(!is.null(names(list)))
class(list) <- "struct"
list
}
# Read a field of StructType from DataFrame
# into a named list in R whose class is "struct"
readStruct <- function(con) {
names <- readObject(con)
fields <- readObject(con)
names(fields) <- names
listToStruct(fields)
}
readRaw <- function(con) {
dataLen <- readInt(con)
if (dataLen == 0) {
raw()
} else {
read_bin(con, raw(), as.integer(dataLen), endian = "big")
}
}
sparklyr_gateway_trouble_shooting_msg <- function() {
c(
"\n\n\nTry running `options(sparklyr.log.console = TRUE)` followed by ",
"`sc <- spark_connect(...)` for more debugging info."
)
}
wait_connect_gateway <- function(gatewayAddress, gatewayPort, config, isStarting) {
waitSeconds <- if (isStarting) {
spark_config_value(config, "sparklyr.connect.timeout", 60)
} else {
spark_config_value(config, "sparklyr.gateway.timeout", 1)
}
gateway <- NULL
commandStart <- Sys.time()
while (is.null(gateway) && Sys.time() < commandStart + waitSeconds) {
tryCatch(
{
suppressWarnings({
timeout <- spark_config_value(config, "sparklyr.gateway.interval", 1)
gateway <- socketConnection(
host = gatewayAddress,
port = gatewayPort,
server = FALSE,
blocking = TRUE,
open = "rb",
timeout = timeout
)
})
},
error = function(err) {
}
)
startWait <- spark_config_value(config, "sparklyr.gateway.wait", 50 / 1000)
Sys.sleep(startWait)
}
gateway
}
spark_gateway_commands <- function() {
list(
"GetPorts" = 0,
"RegisterInstance" = 1
)
}
query_gateway_for_port <- function(gateway, sessionId, config, isStarting) {
waitSeconds <- if (isStarting) {
spark_config_value(config, "sparklyr.connect.timeout", 60)
} else {
spark_config_value(config, "sparklyr.gateway.timeout", 1)
}
writeInt(gateway, spark_gateway_commands()[["GetPorts"]])
writeInt(gateway, sessionId)
writeInt(gateway, if (isStarting) waitSeconds else 0)
backendSessionId <- NULL
redirectGatewayPort <- NULL
commandStart <- Sys.time()
while (length(backendSessionId) == 0 && commandStart + waitSeconds > Sys.time()) {
backendSessionId <- readInt(gateway)
Sys.sleep(0.1)
}
redirectGatewayPort <- readInt(gateway)
backendPort <- readInt(gateway)
if (length(backendSessionId) == 0 || length(redirectGatewayPort) == 0 || length(backendPort) == 0) {
if (isStarting) {
stop(
"Sparklyr gateway did not respond while retrieving ports information after ",
waitSeconds,
" seconds.",
sparklyr_gateway_trouble_shooting_msg()
)
} else {
return(NULL)
}
}
list(
gateway = gateway,
backendPort = backendPort,
redirectGatewayPort = redirectGatewayPort
)
}
spark_connect_gateway <- function(
gatewayAddress,
gatewayPort,
sessionId,
config,
isStarting = FALSE) {
# try connecting to existing gateway
gateway <- wait_connect_gateway(gatewayAddress, gatewayPort, config, isStarting)
if (is.null(gateway)) {
if (isStarting) {
stop(
"Gateway in ", gatewayAddress, ":", gatewayPort, " did not respond.",
sparklyr_gateway_trouble_shooting_msg()
)
}
NULL
}
else {
worker_log("is querying ports from backend using port ", gatewayPort)
gateway_ports_query_attempts <- as.integer(
spark_config_value(config, "sparklyr.gateway.port.query.attempts", 3L)
)
gateway_ports_query_retry_interval_s <- as.integer(
spark_config_value(config, "sparklyr.gateway.port.query.retry.interval.seconds", 4L)
)
while (gateway_ports_query_attempts > 0) {
gateway_ports_query_attempts <- gateway_ports_query_attempts - 1
withCallingHandlers(
{
gatewayPortsQuery <- query_gateway_for_port(
gateway,
sessionId,
config,
isStarting
)
break
},
error = function(e) {
isStarting <- FALSE
if (gateway_ports_query_attempts > 0) {
Sys.sleep(gateway_ports_query_retry_interval_s)
}
NULL
}
)
}
if (is.null(gatewayPortsQuery) && !isStarting) {
close(gateway)
return(NULL)
}
redirectGatewayPort <- gatewayPortsQuery$redirectGatewayPort
backendPort <- gatewayPortsQuery$backendPort
worker_log("found redirect gateway port ", redirectGatewayPort)
if (redirectGatewayPort == 0) {
close(gateway)
if (isStarting) {
stop("Gateway in ", gatewayAddress, ":", gatewayPort, " does not have the requested session registered")
}
NULL
} else if (redirectGatewayPort != gatewayPort) {
close(gateway)
spark_connect_gateway(gatewayAddress, redirectGatewayPort, sessionId, config, isStarting)
}
else {
list(
gateway = gateway,
backendPort = backendPort
)
}
}
}
core_invoke_sync_socket <- function(sc) {
flush <- c(1)
while (length(flush) > 0) {
flush <- readBin(sc$backend, raw(), 1000)
# while flushing monitored connections we don't want to hang forever
if (identical(sc$state$use_monitoring, TRUE)) break
}
}
core_invoke_sync <- function(sc) {
# sleep until connection clears is back on valid state
while (!core_invoke_synced(sc)) {
Sys.sleep(1)
core_invoke_sync_socket(sc)
}
}
core_invoke_cancel_running <- function(sc) {
if (is.null(spark_context(sc))) {
return()
}
# if something fails while using a monitored connection we don't cancel jobs
if (identical(sc$state$use_monitoring, TRUE)) {
return()
}
# if something fails while cancelling jobs we don't cancel jobs, this can
# happen in OutOfMemory errors that shut down the spark context
if (identical(sc$state$cancelling_all_jobs, TRUE)) {
return()
}
connection_progress_context(sc, function() {
sc$state$cancelling_all_jobs <- TRUE
on.exit(sc$state$cancelling_all_jobs <- FALSE)
invoke(spark_context(sc), "cancelAllJobs")
})
if (exists("connection_progress_terminated")) connection_progress_terminated(sc)
}
write_bin_args <- function(backend, object, static, method, args, return_jobj_ref = FALSE) {
rc <- rawConnection(raw(), "r+")
writeString(rc, object)
writeBoolean(rc, static)
writeBoolean(rc, return_jobj_ref)
writeString(rc, method)
writeInt(rc, length(args))
writeArgs(rc, args)
bytes <- rawConnectionValue(rc)
close(rc)
rc <- rawConnection(raw(0), "r+")
writeInt(rc, length(bytes))
writeBin(bytes, rc)
con <- rawConnectionValue(rc)
close(rc)
writeBin(con, backend)
}
core_invoke_synced <- function(sc) {
if (is.null(sc)) {
stop("The connection is no longer valid.")
}
backend <- core_invoke_socket(sc)
echo_id <- "sparklyr"
write_bin_args(backend, "Handler", TRUE, "echo", echo_id)
returnStatus <- readInt(backend)
if (length(returnStatus) == 0 || returnStatus != 0) {
FALSE
}
else {
object <- readObject(sc)
identical(object, echo_id)
}
}
core_invoke_socket <- function(sc) {
if (identical(sc$state$use_monitoring, TRUE)) {
sc$monitoring
} else {
sc$backend
}
}
core_invoke_socket_name <- function(sc) {
if (identical(sc$state$use_monitoring, TRUE)) {
"monitoring"
} else {
"backend"
}
}
core_remove_jobjs <- function(sc, ids) {
core_invoke_method_impl(sc, static = TRUE, noreply = TRUE, "Handler", "rm", FALSE, as.list(ids))
}
core_invoke_method <- function(sc, static, object, method, return_jobj_ref, ...) {
core_invoke_method_impl(sc, static, noreply = FALSE, object, method, return_jobj_ref, ...)
}
core_invoke_method_impl <- function(sc, static, noreply, object, method, return_jobj_ref, ...) {
# N.B.: the reference to `object` must be retained until after a value or exception is returned to us
# from the invoked method here (i.e., cannot have `object <- something_else` before that), because any
# re-assignment could cause the last reference to `object` to be destroyed and the underlying JVM object
# to be deleted from JVMObjectTracker before the actual invocation of the method could happen.
lockBinding("object", environment())
if (is.null(sc)) {
stop("The connection is no longer valid.")
}
args <- list(...)
# initialize status if needed
if (is.null(sc$state$status)) {
sc$state$status <- list()
}
# choose connection socket
backend <- core_invoke_socket(sc)
connection_name <- core_invoke_socket_name(sc)
if (!identical(object, "Handler")) {
toRemoveJobjs <- get_to_remove_jobjs(sc)
objsToRemove <- ls(toRemoveJobjs)
if (length(objsToRemove) > 0) {
core_remove_jobjs(sc, objsToRemove)
rm(list = objsToRemove, envir = toRemoveJobjs)
}
}
if (!identical(object, "Handler") &&
spark_config_value(sc$config, c("sparklyr.cancellable", "sparklyr.connection.cancellable"), TRUE)) {
# if connection still running, sync to valid state
if (identical(sc$state$status[[connection_name]], "running")) {
core_invoke_sync(sc)
}
# while exiting this function, if interrupted (still running), cancel server job
on.exit(core_invoke_cancel_running(sc))
sc$state$status[[connection_name]] <- "running"
}
# if the object is a jobj then get it's id
objId <- ifelse(inherits(object, "spark_jobj"), object$id, object)
write_bin_args(backend, objId, static, method, args, return_jobj_ref)
if (identical(object, "Handler") &&
(identical(method, "terminateBackend") || identical(method, "stopBackend"))) {
# by the time we read response, backend might be already down.
return(NULL)
}
result_object <- NULL
if (!noreply) {
# wait for a return status & result
returnStatus <- readInt(sc)
if (length(returnStatus) == 0) {
# read the spark log
msg <- core_read_spark_log_error(sc)
withr::with_options(list(
warning.length = 8000
), {
stop(
"Unexpected state in sparklyr backend: ",
msg,
call. = FALSE
)
})
}
if (returnStatus != 0) {
# get error message from backend and report to R
msg <- readString(sc)
withr::with_options(list(
warning.length = 8000
), {
if (nzchar(msg)) {
core_handle_known_errors(sc, msg)
stop(msg, call. = FALSE)
} else {
# read the spark log
msg <- core_read_spark_log_error(sc)
stop(msg, call. = FALSE)
}
})
}
result_object <- readObject(sc)
}
sc$state$status[[connection_name]] <- "ready"
on.exit(NULL)
attach_connection(result_object, sc)
}
jobj_subclass.shell_backend <- function(con) {
"shell_jobj"
}
jobj_subclass.spark_connection <- function(con) {
"shell_jobj"
}
jobj_subclass.spark_worker_connection <- function(con) {
"shell_jobj"
}
core_handle_known_errors <- function(sc, msg) {
# Some systems might have an invalid hostname that Spark <= 2.0.1 fails to handle
# gracefully and triggers unexpected errors such as #532. Under these versions,
# we proactevely test getLocalHost() to warn users of this problem.
if (grepl("ServiceConfigurationError.*tachyon", msg, ignore.case = TRUE)) {
warning(
"Failed to retrieve localhost, please validate that the hostname is correctly mapped. ",
"Consider running `hostname` and adding that entry to your `/etc/hosts` file."
)
}
else if (grepl("check worker logs for details", msg, ignore.case = TRUE) &&
spark_master_is_local(sc$master)) {
abort_shell(
"sparklyr worker rscript failure, check worker logs for details",
NULL, NULL, sc$output_file, sc$error_file
)
}
}
core_read_spark_log_error <- function(sc) {
# if there was no error message reported, then
# return information from the Spark logs. return
# all those with most recent timestamp
msg <- "failed to invoke spark command (unknown reason)"
try(silent = TRUE, {
log <- readLines(sc$output_file)
splat <- strsplit(log, "\\s+", perl = TRUE)
n <- length(splat)
timestamp <- splat[[n]][[2]]
regex <- paste("\\b", timestamp, "\\b", sep = "")
entries <- grep(regex, log, perl = TRUE, value = TRUE)
pasted <- paste(entries, collapse = "\n")
msg <- paste("failed to invoke spark command", pasted, sep = "\n")
})
msg
}
#' Retrieve a Spark JVM Object Reference
#'
#' This S3 generic is used for accessing the underlying Java Virtual Machine
#' (JVM) Spark objects associated with \R objects. These objects act as
#' references to Spark objects living in the JVM. Methods on these objects
#' can be called with the \code{\link{invoke}} family of functions.
#'
#' @param x An \R object containing, or wrapping, a \code{spark_jobj}.
#' @param ... Optional arguments; currently unused.
#'
#' @seealso \code{\link{invoke}}, for calling methods on Java object references.
#'
#' @exportClass spark_jobj
#' @export
spark_jobj <- function(x, ...) {
UseMethod("spark_jobj")
}
spark_jobj_id <- function(x) {
x$id
}
#' @export
spark_jobj.default <- function(x, ...) {
stop("Unable to retrieve a spark_jobj from object of class ",
paste(class(x), collapse = " "),
call. = FALSE
)
}
#' @export
spark_jobj.spark_jobj <- function(x, ...) {
x
}
#' @export
print.spark_jobj <- function(x, ...) {
print_jobj(spark_connection(x), x, ...)
}
#' Generic method for print jobj for a connection type
#'
#' @param sc \code{spark_connection} (used for type dispatch)
#' @param jobj Object to print
#'
#' @keywords internal
#'
#' @export
print_jobj <- function(sc, jobj, ...) {
UseMethod("print_jobj")
}
get_valid_jobjs <- function(con) {
if (is.null(con$state$validJobjs)) {
con$state$validJobjs <- new.env(parent = emptyenv())
}
con$state$validJobjs
}
get_to_remove_jobjs <- function(con) {
if (is.null(con$state$toRemoveJobjs)) {
con$state$toRemoveJobjs <- new.env(parent = emptyenv())
}
con$state$toRemoveJobjs
}
# Check if jobj points to a valid external JVM object
isValidJobj <- function(jobj) {
exists("connection", jobj) && exists(jobj$id, get_valid_jobjs(jobj$connection))
}
getJobj <- function(con, objId) {
newObj <- jobj_create(con, objId)
validJobjs <- get_valid_jobjs(con)
validJobjs[[objId]] <- get0(objId, validJobjs, ifnotfound = 0) + 1
newObj
}
jobj_subclass <- function(con) {
UseMethod("jobj_subclass")
}
# Handler for a java object that exists on the backend.
jobj_create <- function(con, objId) {
if (!is.character(objId)) {
stop("object id must be a character")
}
# NOTE: We need a new env for a jobj as we can only register
# finalizers for environments or external references pointers.
obj <- structure(new.env(parent = emptyenv()), class = c("spark_jobj", jobj_subclass(con)))
obj$id <- objId
# Register a finalizer to remove the Java object when this reference
# is garbage collected in R
reg.finalizer(obj, cleanup.jobj)
obj
}
jobj_info <- function(jobj) {
if (!inherits(jobj, "spark_jobj")) {
stop("'jobj_info' called on non-jobj")
}
class <- NULL
repr <- NULL
tryCatch(
{
class <- invoke(jobj, "getClass")
if (inherits(class, "spark_jobj")) {
class <- invoke(class, "getName")
}
},
error = function(e) {
}
)
tryCatch(
{
repr <- invoke(jobj, "toString")
},
error = function(e) {
}
)
list(
class = class,
repr = repr
)
}
jobj_inspect <- function(jobj) {
print(jobj)
if (!connection_is_open(spark_connection(jobj))) {
return(jobj)
}
class <- invoke(jobj, "getClass")
cat("Fields:\n")
fields <- invoke(class, "getDeclaredFields")
lapply(fields, function(field) {
print(field)
})
cat("Methods:\n")
methods <- invoke(class, "getDeclaredMethods")
lapply(methods, function(method) {
print(method)
})
jobj
}
cleanup.jobj <- function(jobj) {
if (isValidJobj(jobj)) {
objId <- jobj$id
validJobjs <- get_valid_jobjs(jobj$connection)
validJobjs[[objId]] <- validJobjs[[objId]] - 1
if (validJobjs[[objId]] == 0) {
rm(list = objId, envir = validJobjs)
# NOTE: We cannot call removeJObject here as the finalizer may be run
# in the middle of another RPC. Thus we queue up this object Id to be removed
# and then run all the removeJObject when the next RPC is called.
toRemoveJobjs <- get_to_remove_jobjs(jobj$connection)
toRemoveJobjs[[objId]] <- 1
}
}
}
clear_jobjs <- function() {
scons <- spark_connection_find()
for (scon in scons) {
validJobjs <- get_valid_jobjs(scons)
valid <- ls(validJobjs)
rm(list = valid, envir = validJobjs)
toRemoveJobjs <- get_to_remove_jobjs(scons)
removeList <- ls(toRemoveJobjs)
rm(list = removeList, envir = toRemoveJobjs)
}
}
attach_connection <- function(jobj, connection) {
if (inherits(jobj, "spark_jobj")) {
jobj$connection <- connection
}
else if (is.list(jobj) || inherits(jobj, "struct")) {
jobj <- lapply(jobj, function(e) {
attach_connection(e, connection)
})
}
else if (is.environment(jobj)) {
jobj <- eapply(jobj, function(e) {
attach_connection(e, connection)
})
}
jobj
}
# Utility functions to serialize R objects so they can be read in Java.
# nolint start
# Type mapping from R to Java
#
# NULL -> Void
# integer -> Int
# character -> String
# logical -> Boolean
# double, numeric -> Double
# raw -> Array[Byte]
# Date -> Date
# POSIXct,POSIXlt -> Timestamp
#
# list[T] -> Array[T], where T is one of above mentioned types
# environment -> Map[String, T], where T is a native type
# jobj -> Object, where jobj is an object created in the backend
# nolint end
getSerdeType <- function(object) {
type <- class(object)[[1]]
if (type != "list") {
type
} else {
# Check if all elements are of same type
elemType <- unique(sapply(object, function(elem) {
getSerdeType(elem)
}))
if (length(elemType) <= 1) {
# Check that there are no NAs in arrays since they are unsupported in scala
hasNAs <- any(is.na(object))
if (hasNAs) {
"list"
} else {
"array"
}
} else {
"list"
}
}
}
writeObject <- function(con, object, writeType = TRUE) {
type <- class(object)[[1]]
if (type %in% c("integer", "character", "logical", "double", "numeric", "factor", "Date", "POSIXct")) {
if (is.na(object)) {
object <- NULL
type <- "NULL"
}
}
serdeType <- getSerdeType(object)
if (writeType) {
writeType(con, serdeType)
}
switch(serdeType,
NULL = writeVoid(con),
integer = writeInt(con, object),
character = writeString(con, object),
logical = writeBoolean(con, object),
double = writeDouble(con, object),
numeric = writeDouble(con, object),
raw = writeRaw(con, object),
array = writeArray(con, object),
list = writeList(con, object),
struct = writeList(con, object),
spark_jobj = writeJobj(con, object),
environment = writeEnv(con, object),
Date = writeDate(con, object),
POSIXlt = writeTime(con, object),
POSIXct = writeTime(con, object),
factor = writeFactor(con, object),
`data.frame` = writeList(con, object),
spark_apply_binary_result = writeList(con, object),
stop("Unsupported type '", serdeType, "' for serialization")
)
}
writeVoid <- function(con) {
# no value for NULL
}
writeJobj <- function(con, value) {
if (!isValidJobj(value)) {
stop("invalid jobj ", value$id)
}
writeString(con, value$id)
}
writeString <- function(con, value) {
utfVal <- enc2utf8(as.character(value))
writeInt(con, as.integer(nchar(utfVal, type = "bytes") + 1))
writeBin(utfVal, con, endian = "big", useBytes = TRUE)
}
writeInt <- function(con, value) {
writeBin(as.integer(value), con, endian = "big")
}
writeDouble <- function(con, value) {
writeBin(value, con, endian = "big")
}
writeBoolean <- function(con, value) {
# TRUE becomes 1, FALSE becomes 0
writeInt(con, as.integer(value))
}
writeRaw <- function(con, batch) {
writeInt(con, length(batch))
writeBin(batch, con, endian = "big")
}
writeType <- function(con, class) {
type <- switch(class,
NULL = "n",
integer = "i",
character = "c",
logical = "b",
double = "d",
numeric = "d",
raw = "r",
array = "a",
list = "l",
struct = "s",
spark_jobj = "j",
environment = "e",
Date = "D",
POSIXlt = "t",
POSIXct = "t",
factor = "c",
`data.frame` = "l",
spark_apply_binary_result = "l",
stop("Unsupported type '", class, "' for serialization")
)
writeBin(charToRaw(type), con)
}
# Used to pass arrays where all the elements are of the same type
writeArray <- function(con, arr) {
# TODO: Empty lists are given type "character" right now.
# This may not work if the Java side expects array of any other type.
if (length(arr) == 0) {
elemType <- class("somestring")
} else {
elemType <- getSerdeType(arr[[1]])
}
writeType(con, elemType)
writeInt(con, length(arr))
if (length(arr) > 0) {
for (a in arr) {
writeObject(con, a, FALSE)
}
}
}
# Used to pass arrays where the elements can be of different types
writeList <- function(con, list) {
writeInt(con, length(list))
for (elem in list) {
writeObject(con, elem)
}
}
# Used to pass in hash maps required on Java side.
writeEnv <- function(con, env) {
len <- length(env)
writeInt(con, len)
if (len > 0) {
writeArray(con, as.list(ls(env)))
vals <- lapply(ls(env), function(x) {
env[[x]]
})
writeList(con, as.list(vals))
}
}
writeDate <- function(con, date) {
writeInt(con, as.integer(date))
}
writeTime <- function(con, time) {
writeDouble(con, as.double(time))
}
writeFactor <- function(con, factor) {
writeString(con, as.character(factor))
}
# Used to serialize in a list of objects where each
# object can be of a different type. Serialization format is
# <object type> <object> for each object
writeArgs <- function(con, args) {
if (length(args) > 0) {
for (a in args) {
writeObject(con, a)
}
}
}
core_get_package_function <- function(packageName, functionName) {
if (packageName %in% rownames(installed.packages()) &&
exists(functionName, envir = asNamespace(packageName))) {
get(functionName, envir = asNamespace(packageName))
} else {
NULL
}
}
worker_config_serialize <- function(config) {
paste(
if (isTRUE(config$debug)) "TRUE" else "FALSE",
spark_config_value(config, "sparklyr.worker.gateway.port", "8880"),
spark_config_value(config, "sparklyr.worker.gateway.address", "localhost"),
if (isTRUE(config$profile)) "TRUE" else "FALSE",
if (isTRUE(config$schema)) "TRUE" else "FALSE",
if (isTRUE(config$arrow)) "TRUE" else "FALSE",
if (isTRUE(config$fetch_result_as_sdf)) "TRUE" else "FALSE",
if (isTRUE(config$single_binary_column)) "TRUE" else "FALSE",
config$spark_version,
sep = ";"
)
}
worker_config_deserialize <- function(raw) {
parts <- strsplit(raw, ";")[[1]]
list(
debug = as.logical(parts[[1]]),
sparklyr.gateway.port = as.integer(parts[[2]]),
sparklyr.gateway.address = parts[[3]],
profile = as.logical(parts[[4]]),
schema = as.logical(parts[[5]]),
arrow = as.logical(parts[[6]]),
fetch_result_as_sdf = as.logical(parts[[7]]),
single_binary_column = as.logical(parts[[8]]),
spark_version = parts[[9]]
)
}
# nocov start
spark_worker_context <- function(sc) {
hostContextId <- worker_invoke_method(sc, FALSE, "Handler", "getHostContext")
worker_log("retrieved worker context id ", hostContextId)
context <- structure(
class = c("spark_jobj", "shell_jobj"),
list(
id = hostContextId,
connection = sc
)
)
worker_log("retrieved worker context")
context
}
spark_worker_init_packages <- function(sc, context) {
bundlePath <- worker_invoke(context, "getBundlePath")
if (nchar(bundlePath) > 0) {
bundleName <- basename(bundlePath)
worker_log("using bundle name ", bundleName)
workerRootDir <- worker_invoke_static(sc, "org.apache.spark.SparkFiles", "getRootDirectory")
sparkBundlePath <- file.path(workerRootDir, bundleName)
worker_log("using bundle path ", normalizePath(sparkBundlePath))
if (!file.exists(sparkBundlePath)) {
stop("failed to find bundle under SparkFiles root directory")
}
unbundlePath <- worker_spark_apply_unbundle(
sparkBundlePath,
workerRootDir,
tools::file_path_sans_ext(bundleName)
)
.libPaths(unbundlePath)
worker_log("updated .libPaths with bundle packages")
}
else {
spark_env <- worker_invoke_static(sc, "org.apache.spark.SparkEnv", "get")
spark_libpaths <- worker_invoke(worker_invoke(spark_env, "conf"), "get", "spark.r.libpaths", NULL)
if (!is.null(spark_libpaths)) {
spark_libpaths <- unlist(strsplit(spark_libpaths, split = ","))
.libPaths(spark_libpaths)
}
}
}
spark_worker_execute_closure <- function(
sc,
closure,
df,
funcContext,
grouped_by,
barrier_map,
fetch_result_as_sdf,
partition_index) {
if (nrow(df) == 0) {
worker_log("found that source has no rows to be proceesed")
return(NULL)
}
barrier_arg <- NULL
worker_log("barrier is ", as.character(barrier_map))
if (length(barrier_map) > 0) {
worker_log("found barrier execution context")
barrier_arg <- list(barrier = barrier_map)
}
closure_params <- length(formals(closure))
has_partition_index_param <- (
!is.null(funcContext$partition_index_param) &&
nchar(funcContext$partition_index_param) > 0
)
if (has_partition_index_param) closure_params <- closure_params - 1
closure_args <- c(
list(df),
if (!is.null(funcContext$user_context)) list(funcContext$user_context) else NULL,
lapply(grouped_by, function(group_by_name) df[[group_by_name]][[1]]),
barrier_arg
)[0:closure_params]
if (has_partition_index_param) {
closure_args[[funcContext$partition_index_param]] <- partition_index
}
worker_log("computing closure")
result <- do.call(closure, closure_args)
worker_log("computed closure")
as_factors <- getOption("stringsAsFactors")
on.exit(options(stringsAsFactors = as_factors))
options(stringsAsFactors = FALSE)
if (identical(fetch_result_as_sdf, FALSE)) {
serialize_impl <- spark_worker_get_serializer(sc)
result <- lapply(result, function(x) serialize_impl(x, NULL))
class(result) <- c("spark_apply_binary_result", class(result))
result <- tibble::tibble(spark_apply_binary_result = result)
}
if (!"data.frame" %in% class(result)) {
worker_log("data.frame expected but ", class(result), " found")
result <- as.data.frame(result)
}
result
}
spark_worker_clean_factors <- function(result) {
if (any(sapply(result, is.factor))) {
result <- as.data.frame(lapply(result, function(x) if (is.factor(x)) as.character(x) else x), stringsAsFactors = FALSE)
}
result
}
spark_worker_maybe_serialize_list_cols_as_json <- function(config, result) {
if (identical(config$fetch_result_as_sdf, TRUE) &&
config$spark_version >= "2.4.0" &&
any(sapply(result, is.list))) {
result <- do.call(
tibble::tibble,
lapply(
result,
function(x) {
if (is.list(x)) {
x <- sapply(
x,
function(e) jsonlite::toJSON(e, auto_unbox = TRUE, digits = NA)
)
class(x) <- c(class(x), "list_col_as_json")
}
x
}
)
)
}
result
}
spark_worker_apply_maybe_schema <- function(config, result) {
if (identical(config$schema, TRUE)) {
worker_log("updating schema")
col_names <- colnames(result)
types <- list()
json_cols <- list()
for (i in seq_along(result)) {
if ("list_col_as_json" %in% class(result[[i]])) {
json_cols <- append(json_cols, col_names[[i]])
types <- append(types, "character")
} else {
types <- append(types, class(result[[i]])[[1]])
}
}
result <- data.frame(
names = paste(col_names, collapse = "|"),
types = paste(types, collapse = "|"),
json_cols = paste(json_cols, collapse = "|"),
stringsAsFactors = FALSE
)
}
result
}
spark_worker_build_types <- function(context, columns) {
names <- names(columns)
sqlutils <- worker_invoke(context, "getSqlUtils")
fields <- worker_invoke(
sqlutils,
"createStructFields",
lapply(
names,
function(name) {
list(name, columns[[name]][[1]], TRUE)
}
)
)
worker_invoke(sqlutils, "createStructType", fields)
}
spark_worker_get_group_batch <- function(batch) {
worker_invoke(
batch, "get", 0L
)
}
spark_worker_add_group_by_column <- function(df, result, grouped, grouped_by) {
if (grouped) {
if (nrow(result) > 0) {
new_column_values <- lapply(grouped_by, function(grouped_by_name) df[[grouped_by_name]][[1]])
names(new_column_values) <- grouped_by
if ("AsIs" %in% class(result)) class(result) <- class(result)[-match("AsIs", class(result))]
result <- do.call("cbind", list(new_column_values, result))
names(result) <- gsub("\\.", "_", make.unique(names(result)))
}
else {
result <- NULL
}
}
result
}
get_arrow_converters <- function(context, config) {
if (config$spark_version < "2.3.0") {
stop("ArrowConverters is only supported for Spark 2.3 or above.")
}
worker_invoke(context, "getArrowConverters")
}
get_arrow_converters_impl <- function(context, config) {
if (config$spark_version < "2.3.0") {
stop("ArrowConverters is only supported for Spark 2.3 or above.")
}
worker_invoke(context, "getArrowConvertersImpl")
}
spark_worker_apply_arrow <- function(sc, config) {
worker_log("using arrow serializer")
context <- spark_worker_context(sc)
spark_worker_init_packages(sc, context)
deserialize_impl <- spark_worker_get_deserializer(sc)
closure <- deserialize_impl(worker_invoke(context, "getClosure"))
funcContext <- deserialize_impl(worker_invoke(context, "getContext"))
grouped_by <- worker_invoke(context, "getGroupBy")
grouped <- !is.null(grouped_by) && length(grouped_by) > 0
columnNames <- worker_invoke(context, "getColumns")
schema_input <- worker_invoke(context, "getSchema")
time_zone <- worker_invoke(context, "getTimeZoneId")
options_map <- worker_invoke(context, "getOptions")
barrier_map <- as.list(worker_invoke(context, "getBarrier"))
partition_index <- worker_invoke(context, "getPartitionIndex")
if (grouped) {
record_batch_raw_groups <- worker_invoke(context, "getSourceArray")
record_batch_raw_groups_idx <- 1
record_batch_raw <- spark_worker_get_group_batch(record_batch_raw_groups[[record_batch_raw_groups_idx]])
} else {
row_iterator <- worker_invoke(context, "getIterator")
arrow_converters_impl <- get_arrow_converters_impl(context, config)
record_batch_raw <- worker_invoke(
arrow_converters_impl,
"toBatchArray",
row_iterator,
schema_input,
time_zone,
as.integer(options_map[["maxRecordsPerBatch"]])
)
}
reader <- arrow_record_stream_reader(record_batch_raw)
record_entry <- arrow_read_record_batch(reader)
all_batches <- list()
total_rows <- 0
schema_output <- NULL
batch_idx <- 0
while (!is.null(record_entry)) {
batch_idx <- batch_idx + 1
worker_log("is processing batch ", batch_idx)
df <- arrow_as_tibble(record_entry)
result <- NULL
if (!is.null(df)) {
colnames(df) <- columnNames[seq_along(colnames(df))]
result <- spark_worker_execute_closure(
sc,
closure,
df,
funcContext,
grouped_by,
barrier_map,
config$fetch_result_as_sdf,
partition_index
)
result <- spark_worker_add_group_by_column(df, result, grouped, grouped_by)
result <- spark_worker_clean_factors(result)
result <- spark_worker_maybe_serialize_list_cols_as_json(config, result)
result <- spark_worker_apply_maybe_schema(config, result)
}
if (!is.null(result)) {
if (is.null(schema_output)) {
schema_output <- spark_worker_build_types(context, lapply(result, class))
}
raw_batch <- arrow_write_record_batch(result, config$spark_version)
all_batches[[length(all_batches) + 1]] <- raw_batch
total_rows <- total_rows + nrow(result)
}
record_entry <- arrow_read_record_batch(reader)
if (grouped && is.null(record_entry) && record_batch_raw_groups_idx < length(record_batch_raw_groups)) {
record_batch_raw_groups_idx <- record_batch_raw_groups_idx + 1
record_batch_raw <- spark_worker_get_group_batch(record_batch_raw_groups[[record_batch_raw_groups_idx]])
reader <- arrow_record_stream_reader(record_batch_raw)
record_entry <- arrow_read_record_batch(reader)
}
}
if (length(all_batches) > 0) {
worker_log("updating ", total_rows, " rows using ", length(all_batches), " row batches")
arrow_converters <- get_arrow_converters(context, config)
row_iter <- worker_invoke(arrow_converters, "fromPayloadArray", all_batches, schema_output)
worker_invoke(context, "setResultIter", row_iter)
worker_log("updated ", total_rows, " rows using ", length(all_batches), " row batches")
} else {
worker_log("found no rows in closure result")
}
worker_log("finished apply")
}
spark_worker_get_serializer <- function(sc) {
serializer <- unserialize(worker_invoke(spark_worker_context(sc), "getSerializer"))
if (is.list(serializer)) {
function(x, ...) serializer$serializer(x)
} else {
serializer
}
}
spark_worker_get_deserializer <- function(sc) {
unserialize(worker_invoke(spark_worker_context(sc), "getDeserializer"))
}
spark_worker_apply <- function(sc, config) {
context <- spark_worker_context(sc)
spark_worker_init_packages(sc, context)
grouped_by <- worker_invoke(context, "getGroupBy")
grouped <- !is.null(grouped_by) && length(grouped_by) > 0
if (grouped) worker_log("working over grouped data")
length <- worker_invoke(context, "getSourceArrayLength")
worker_log("found ", length, " rows")
groups <- worker_invoke(context, if (grouped) "getSourceArrayGroupedSeq" else "getSourceArraySeq")
worker_log("retrieved ", length(groups), " rows")
deserialize_impl <- spark_worker_get_deserializer(sc)
closureRaw <- worker_invoke(context, "getClosure")
closure <- deserialize_impl(closureRaw)
funcContextRaw <- worker_invoke(context, "getContext")
funcContext <- deserialize_impl(funcContextRaw)
closureRLangRaw <- worker_invoke(context, "getClosureRLang")
if (length(closureRLangRaw) > 0) {
worker_log("found rlang closure")
closureRLang <- spark_worker_rlang_unserialize()
if (!is.null(closureRLang)) {
closure <- closureRLang(closureRLangRaw)
worker_log("created rlang closure")
}
}
if (identical(config$schema, TRUE)) {
worker_log("is running to compute schema")
}
columnNames <- worker_invoke(context, "getColumns")
barrier_map <- as.list(worker_invoke(context, "getBarrier"))
partition_index <- worker_invoke(context, "getPartitionIndex")
if (!grouped) groups <- list(list(groups))
all_results <- NULL
for (group_entry in groups) {
# serialized groups are wrapped over single lists
data <- group_entry[[1]]
df <- (
if (config$single_binary_column) {
tibble::tibble(encoded = lapply(data, function(x) x[[1]]))
} else {
bind_rows <- core_get_package_function("dplyr", "bind_rows")
as_tibble <- core_get_package_function("tibble", "as_tibble")
if (!is.null(bind_rows) && !is.null(as_tibble)) {
do.call(
bind_rows,
lapply(
data, function(x) { as_tibble(x, .name_repair = "universal") }
)
)
} else {
warning("dplyr::bind_rows or tibble::as_tibble is unavailable, ",
"falling back to rbind implementation in base R. ",
"Inputs with list column(s) will not work.")
do.call(rbind.data.frame, c(data, list(stringsAsFactors = FALSE)))
}
})
if (!config$single_binary_column) {
# rbind removes Date classes so we re-assign them here
if (length(data) > 0 && ncol(df) > 0 && nrow(df) > 0) {
if (any(sapply(data[[1]], function(e) class(e)[[1]]) %in% c("Date", "POSIXct"))) {
first_row <- data[[1]]
for (idx in seq_along(first_row)) {
first_class <- class(first_row[[idx]])[[1]]
if (identical(first_class, "Date")) {
df[[idx]] <- as.Date(df[[idx]], origin = "1970-01-01")
} else if (identical(first_class, "POSIXct")) {
df[[idx]] <- as.POSIXct(df[[idx]], origin = "1970-01-01")
}
}
}
# cast column to correct type, for instance, when dealing with NAs.
for (i in seq_along(df)) {
target_type <- funcContext$column_types[[i]]
if (!is.null(target_type) && class(df[[i]]) != target_type) {
df[[i]] <- do.call(paste("as", target_type, sep = "."), args = list(df[[i]]))
}
}
}
}
colnames(df) <- columnNames[seq_along(colnames(df))]
result <- spark_worker_execute_closure(
sc,
closure,
df,
funcContext,
grouped_by,
barrier_map,
config$fetch_result_as_sdf,
partition_index
)
result <- spark_worker_add_group_by_column(df, result, grouped, grouped_by)
result <- spark_worker_clean_factors(result)
result <- spark_worker_maybe_serialize_list_cols_as_json(config, result)
result <- spark_worker_apply_maybe_schema(config, result)
all_results <- rbind(all_results, result)
}
if (!is.null(all_results) && nrow(all_results) > 0) {
worker_log("updating ", nrow(all_results), " rows")
all_data <- lapply(seq_len(nrow(all_results)), function(i) as.list(all_results[i, ]))
worker_invoke(context, "setResultArraySeq", all_data)
worker_log("updated ", nrow(all_results), " rows")
} else {
worker_log("found no rows in closure result")
}
worker_log("finished apply")
}
spark_worker_rlang_unserialize <- function() {
rlang_unserialize <- core_get_package_function("rlang", "bytes_unserialise")
if (is.null(rlang_unserialize)) {
core_get_package_function("rlanglabs", "bytes_unserialise")
} else {
rlang_unserialize
}
}
spark_worker_unbundle_path <- function() {
file.path("sparklyr-bundle")
}
#' Extracts a bundle of dependencies required by \code{spark_apply()}
#'
#' @param bundle_path Path to the bundle created using \code{spark_apply_bundle()}
#' @param base_path Base path to use while extracting bundles
#'
#' @keywords internal
#' @export
worker_spark_apply_unbundle <- function(bundle_path, base_path, bundle_name) {
extractPath <- file.path(base_path, spark_worker_unbundle_path(), bundle_name)
lockFile <- file.path(extractPath, "sparklyr.lock")
if (!dir.exists(extractPath)) dir.create(extractPath, recursive = TRUE)
if (length(dir(extractPath)) == 0) {
worker_log("found that the unbundle path is empty, extracting:", extractPath)
writeLines("", lockFile)
system2("tar", c("-xf", bundle_path, "-C", extractPath))
unlink(lockFile)
}
if (file.exists(lockFile)) {
worker_log("found that lock file exists, waiting")
while (file.exists(lockFile)) {
Sys.sleep(1)
}
worker_log("completed lock file wait")
}
extractPath
}
# nocov end
# nocov start
spark_worker_connect <- function(
sessionId,
backendPort = 8880,
config = list()) {
gatewayPort <- spark_config_value(config, "sparklyr.worker.gateway.port", backendPort)
gatewayAddress <- spark_config_value(config, "sparklyr.worker.gateway.address", "localhost")
config <- list()
worker_log("is connecting to backend using port ", gatewayPort)
gatewayInfo <- spark_connect_gateway(gatewayAddress,
gatewayPort,
sessionId,
config = config,
isStarting = TRUE
)
worker_log("is connected to backend")
worker_log("is connecting to backend session")
tryCatch(
{
interval <- spark_config_value(config, "sparklyr.backend.interval", 1)
backend <- socketConnection(
host = "localhost",
port = gatewayInfo$backendPort,
server = FALSE,
blocking = interval > 0,
open = "wb",
timeout = interval
)
class(backend) <- c(class(backend), "shell_backend")
},
error = function(err) {
close(gatewayInfo$gateway)
stop(
"Failed to open connection to backend:", err$message
)
}
)
worker_log("is connected to backend session")
sc <- structure(class = c("spark_worker_connection"), list(
# spark_connection
master = "",
method = "shell",
app_name = NULL,
config = NULL,
state = new.env(),
# spark_shell_connection
spark_home = NULL,
backend = backend,
gateway = gatewayInfo$gateway,
output_file = NULL
))
worker_log("created connection")
sc
}
# nocov end
# nocov start
connection_is_open.spark_worker_connection <- function(sc) {
bothOpen <- FALSE
if (!identical(sc, NULL)) {
tryCatch(
{
bothOpen <- isOpen(sc$backend) && isOpen(sc$gateway)
},
error = function(e) {
}
)
}
bothOpen
}
worker_connection <- function(x, ...) {
UseMethod("worker_connection")
}
worker_connection.spark_jobj <- function(x, ...) {
x$connection
}
# nocov end
# nocov start
worker_invoke_method <- function(sc, static, object, method, ...) {
core_invoke_method(sc, static, object, method, FALSE, ...)
}
worker_invoke <- function(jobj, method, ...) {
UseMethod("worker_invoke")
}
worker_invoke.shell_jobj <- function(jobj, method, ...) {
worker_invoke_method(worker_connection(jobj), FALSE, jobj, method, ...)
}
worker_invoke_static <- function(sc, class, method, ...) {
worker_invoke_method(sc, TRUE, class, method, ...)
}
worker_invoke_new <- function(sc, class, ...) {
worker_invoke_method(sc, TRUE, class, "<init>", ...)
}
# nocov end
# nocov start
worker_log_env <- new.env()
worker_log_session <- function(sessionId) {
assign("sessionId", sessionId, envir = worker_log_env)
}
worker_log_format <- function(message, session, level = "INFO", component = "RScript") {
paste(
format(Sys.time(), "%y/%m/%d %H:%M:%S"),
" ",
level,
" sparklyr: ",
component,
" (",
session,
") ",
message,
sep = ""
)
}
worker_log_level <- function(..., level, component = "RScript") {
if (is.null(worker_log_env$sessionId)) {
worker_log_env <- get0("worker_log_env", envir = .GlobalEnv)
if (is.null(worker_log_env$sessionId)) {
return()
}
}
args <- list(...)
message <- paste(args, sep = "", collapse = "")
formatted <- worker_log_format(message, worker_log_env$sessionId,
level = level, component = component
)
cat(formatted, "\n")
}
worker_log <- function(...) {
worker_log_level(..., level = "INFO")
}
worker_log_warning <- function(...) {
worker_log_level(..., level = "WARN")
}
worker_log_error <- function(...) {
worker_log_level(..., level = "ERROR")
}
# nocov end
# nocov start
.worker_globals <- new.env(parent = emptyenv())
spark_worker_main <- function(
sessionId,
backendPort = 8880,
configRaw = NULL) {
spark_worker_hooks()
tryCatch(
{
worker_log_session(sessionId)
if (is.null(configRaw)) configRaw <- worker_config_serialize(list())
config <- worker_config_deserialize(configRaw)
if (identical(config$profile, TRUE)) {
profile_name <- paste("spark-apply-", as.numeric(Sys.time()), ".Rprof", sep = "")
worker_log("starting new profile in ", file.path(getwd(), profile_name))
utils::Rprof(profile_name)
}
if (config$debug) {
worker_log("exiting to wait for debugging session to attach")
# sleep for 1 day to allow long debugging sessions
Sys.sleep(60 * 60 * 24)
return()
}
worker_log("is starting")
options(sparklyr.connection.cancellable = FALSE)
sc <- spark_worker_connect(sessionId, backendPort, config)
worker_log("is connected")
if (config$arrow) {
spark_worker_apply_arrow(sc, config)
}
else {
spark_worker_apply(sc, config)
}
if (identical(config$profile, TRUE)) {
# utils::Rprof(NULL)
worker_log("closing profile")
}
},
error = function(e) {
worker_log_error("terminated unexpectedly: ", e$message)
if (exists(".stopLastError", envir = .worker_globals)) {
worker_log_error("collected callstack: \n", get(".stopLastError", envir = .worker_globals))
}
quit(status = -1)
}
)
worker_log("finished")
}
spark_worker_hooks <- function() {
unlock <- get("unlockBinding")
lock <- get("lockBinding")
originalStop <- stop
unlock("stop", as.environment("package:base"))
assign("stop", function(...) {
frame_names <- list()
frame_start <- max(1, sys.nframe() - 5)
for (i in frame_start:sys.nframe()) {
current_call <- sys.call(i)
frame_names[[1 + i - frame_start]] <- paste(i, ": ", paste(head(deparse(current_call), 5), collapse = "\n"), sep = "")
}
assign(".stopLastError", paste(rev(frame_names), collapse = "\n"), envir = .worker_globals)
originalStop(...)
}, as.environment("package:base"))
lock("stop", as.environment("package:base"))
}
# nocov end
do.call(spark_worker_main, as.list(commandArgs(trailingOnly = TRUE)))
| /java/embedded_sources.R | permissive | lresende/sparklyr | R | false | false | 55,069 | r | # Changing this file requires running update_embedded_sources.R to rebuild sources and jars.
arrow_write_record_batch <- function(df, spark_version_number = NULL) {
arrow_env_vars <- list()
if (!is.null(spark_version_number) && spark_version_number < "3.0") {
# Spark < 3 uses an old version of Arrow, so send data in the legacy format
arrow_env_vars$ARROW_PRE_0_15_IPC_FORMAT <- 1
}
withr::with_envvar(arrow_env_vars, {
# New in arrow 0.17: takes a data.frame and returns a raw buffer with Arrow data
if ("write_to_raw" %in% ls(envir = asNamespace("arrow"))) {
# Fixed in 0.17: arrow doesn't hardcode a GMT timezone anymore
# so set the local timezone to any POSIXt columns that don't have one set
# https://github.com/sparklyr/sparklyr/issues/2439
df[] <- lapply(df, function(x) {
if (inherits(x, "POSIXt") && is.null(attr(x, "tzone"))) {
attr(x, "tzone") <- Sys.timezone()
}
x
})
arrow::write_to_raw(df, format = "stream")
} else {
arrow::write_arrow(arrow::record_batch(!!!df), raw())
}
})
}
arrow_record_stream_reader <- function(stream) {
arrow::RecordBatchStreamReader$create(stream)
}
arrow_read_record_batch <- function(reader) reader$read_next_batch()
arrow_as_tibble <- function(record) as.data.frame(record)
#' A helper function to retrieve values from \code{spark_config()}
#'
#' @param config The configuration list from \code{spark_config()}
#' @param name The name of the configuration entry
#' @param default The default value to use when entry is not present
#'
#' @keywords internal
#' @export
spark_config_value <- function(config, name, default = NULL) {
if (getOption("sparklyr.test.enforce.config", FALSE) && any(grepl("^sparklyr.", name))) {
settings <- get("spark_config_settings")()
if (!any(name %in% settings$name) &&
!grepl("^sparklyr\\.shell\\.", name)) {
stop("Config value '", name[[1]], "' not described in spark_config_settings()")
}
}
name_exists <- name %in% names(config)
if (!any(name_exists)) {
name_exists <- name %in% names(options())
if (!any(name_exists)) {
value <- default
} else {
name_primary <- name[name_exists][[1]]
value <- getOption(name_primary)
}
} else {
name_primary <- name[name_exists][[1]]
value <- config[[name_primary]]
}
if (is.language(value)) value <- rlang::as_closure(value)
if (is.function(value)) value <- value()
value
}
spark_config_integer <- function(config, name, default = NULL) {
as.integer(spark_config_value(config, name, default))
}
spark_config_logical <- function(config, name, default = NULL) {
as.logical(spark_config_value(config, name, default))
}
#' Check whether the connection is open
#'
#' @param sc \code{spark_connection}
#'
#' @keywords internal
#'
#' @export
connection_is_open <- function(sc) {
UseMethod("connection_is_open")
}
read_bin <- function(con, what, n, endian = NULL) {
UseMethod("read_bin")
}
read_bin.default <- function(con, what, n, endian = NULL) {
if (is.null(endian)) readBin(con, what, n) else readBin(con, what, n, endian = endian)
}
read_bin_wait <- function(con, what, n, endian = NULL) {
sc <- con
con <- if (!is.null(sc$state) && identical(sc$state$use_monitoring, TRUE)) sc$monitoring else sc$backend
timeout <- spark_config_value(sc$config, "sparklyr.backend.timeout", 30 * 24 * 60 * 60)
progressInterval <- spark_config_value(sc$config, "sparklyr.progress.interval", 3)
result <- if (is.null(endian)) readBin(con, what, n) else readBin(con, what, n, endian = endian)
progressTimeout <- Sys.time() + progressInterval
if (is.null(sc$state$progress)) {
sc$state$progress <- new.env()
}
progressUpdated <- FALSE
waitInterval <- 0
commandStart <- Sys.time()
while (length(result) == 0 && commandStart + timeout > Sys.time()) {
Sys.sleep(waitInterval)
waitInterval <- min(0.1, waitInterval + 0.01)
result <- if (is.null(endian)) readBin(con, what, n) else readBin(con, what, n, endian = endian)
if (Sys.time() > progressTimeout) {
progressTimeout <- Sys.time() + progressInterval
if (exists("connection_progress")) {
connection_progress(sc)
progressUpdated <- TRUE
}
}
}
if (progressUpdated) connection_progress_terminated(sc)
if (commandStart + timeout <= Sys.time()) {
stop("Operation timed out, increase config option sparklyr.backend.timeout if needed.")
}
result
}
read_bin.spark_connection <- function(con, what, n, endian = NULL) {
read_bin_wait(con, what, n, endian)
}
read_bin.spark_worker_connection <- function(con, what, n, endian = NULL) {
read_bin_wait(con, what, n, endian)
}
read_bin.livy_backend <- function(con, what, n, endian = NULL) {
read_bin.default(con$rc, what, n, endian)
}
readObject <- function(con) {
# Read type first
type <- readType(con)
readTypedObject(con, type)
}
readTypedObject <- function(con, type) {
switch(type,
"i" = readInt(con),
"c" = readString(con),
"b" = readBoolean(con),
"d" = readDouble(con),
"r" = readRaw(con),
"D" = readDate(con),
"t" = readTime(con),
"a" = readArray(con),
"l" = readList(con),
"e" = readMap(con),
"s" = readStruct(con),
"f" = readFastStringArray(con),
"n" = NULL,
"j" = getJobj(con, readString(con)),
"J" = jsonlite::fromJSON(
readString(con),
simplifyDataFrame = FALSE, simplifyMatrix = FALSE
),
stop(paste("Unsupported type for deserialization", type))
)
}
readString <- function(con) {
stringLen <- readInt(con)
string <- ""
if (stringLen > 0) {
raw <- read_bin(con, raw(), stringLen, endian = "big")
if (is.element("00", raw)) {
warning("Input contains embedded nuls, removing.")
raw <- raw[raw != "00"]
}
string <- rawToChar(raw)
}
Encoding(string) <- "UTF-8"
string
}
readFastStringArray <- function(con) {
joined <- readString(con)
as.list(strsplit(joined, "\u0019")[[1]])
}
readDateArray <- function(con, n = 1) {
if (n == 0) {
as.Date(NA)
} else {
do.call(c, lapply(seq(n), function(x) readDate(con)))
}
}
readInt <- function(con, n = 1) {
if (n == 0) {
integer(0)
} else {
read_bin(con, integer(), n = n, endian = "big")
}
}
readDouble <- function(con, n = 1) {
if (n == 0) {
double(0)
} else {
read_bin(con, double(), n = n, endian = "big")
}
}
readBoolean <- function(con, n = 1) {
if (n == 0) {
logical(0)
} else {
as.logical(readInt(con, n = n))
}
}
readType <- function(con) {
rawToChar(read_bin(con, "raw", n = 1L))
}
readDate <- function(con) {
n <- readInt(con)
if (is.na(n)) {
as.Date(NA)
} else {
d <- as.Date(n, origin = "1970-01-01", tz = "UTC")
if (getOption("sparklyr.collect.datechars", FALSE)) {
as.character(d)
} else {
d
}
}
}
readTime <- function(con, n = 1) {
if (identical(n, 0)) {
as.POSIXct(character(0))
} else {
t <- readDouble(con, n)
r <- as.POSIXct(t, origin = "1970-01-01", tz = "UTC")
if (getOption("sparklyr.collect.datechars", FALSE)) {
as.character(r)
} else {
r
}
}
}
readArray <- function(con) {
type <- readType(con)
len <- readInt(con)
if (type == "d") {
return(readDouble(con, n = len))
} else if (type == "i") {
return(readInt(con, n = len))
} else if (type == "b") {
return(readBoolean(con, n = len))
} else if (type == "t") {
return(readTime(con, n = len))
} else if (type == "D") {
return(readDateArray(con, n = len))
}
if (len > 0) {
l <- vector("list", len)
for (i in seq_len(len)) {
l[[i]] <- readTypedObject(con, type)
}
l
} else {
list()
}
}
# Read a list. Types of each element may be different.
# Null objects are read as NA.
readList <- function(con) {
len <- readInt(con)
if (len > 0) {
l <- vector("list", len)
for (i in seq_len(len)) {
elem <- readObject(con)
if (is.null(elem)) {
elem <- NA
}
l[[i]] <- elem
}
l
} else {
list()
}
}
readMap <- function(con) {
map <- list()
len <- readInt(con)
if (len > 0) {
for (i in seq_len(len)) {
key <- readString(con)
value <- readObject(con)
map[[key]] <- value
}
}
map
}
# Convert a named list to struct so that
# SerDe won't confuse between a normal named list and struct
listToStruct <- function(list) {
stopifnot(class(list) == "list")
stopifnot(!is.null(names(list)))
class(list) <- "struct"
list
}
# Read a field of StructType from DataFrame
# into a named list in R whose class is "struct"
readStruct <- function(con) {
names <- readObject(con)
fields <- readObject(con)
names(fields) <- names
listToStruct(fields)
}
readRaw <- function(con) {
dataLen <- readInt(con)
if (dataLen == 0) {
raw()
} else {
read_bin(con, raw(), as.integer(dataLen), endian = "big")
}
}
sparklyr_gateway_trouble_shooting_msg <- function() {
c(
"\n\n\nTry running `options(sparklyr.log.console = TRUE)` followed by ",
"`sc <- spark_connect(...)` for more debugging info."
)
}
wait_connect_gateway <- function(gatewayAddress, gatewayPort, config, isStarting) {
waitSeconds <- if (isStarting) {
spark_config_value(config, "sparklyr.connect.timeout", 60)
} else {
spark_config_value(config, "sparklyr.gateway.timeout", 1)
}
gateway <- NULL
commandStart <- Sys.time()
while (is.null(gateway) && Sys.time() < commandStart + waitSeconds) {
tryCatch(
{
suppressWarnings({
timeout <- spark_config_value(config, "sparklyr.gateway.interval", 1)
gateway <- socketConnection(
host = gatewayAddress,
port = gatewayPort,
server = FALSE,
blocking = TRUE,
open = "rb",
timeout = timeout
)
})
},
error = function(err) {
}
)
startWait <- spark_config_value(config, "sparklyr.gateway.wait", 50 / 1000)
Sys.sleep(startWait)
}
gateway
}
spark_gateway_commands <- function() {
list(
"GetPorts" = 0,
"RegisterInstance" = 1
)
}
query_gateway_for_port <- function(gateway, sessionId, config, isStarting) {
waitSeconds <- if (isStarting) {
spark_config_value(config, "sparklyr.connect.timeout", 60)
} else {
spark_config_value(config, "sparklyr.gateway.timeout", 1)
}
writeInt(gateway, spark_gateway_commands()[["GetPorts"]])
writeInt(gateway, sessionId)
writeInt(gateway, if (isStarting) waitSeconds else 0)
backendSessionId <- NULL
redirectGatewayPort <- NULL
commandStart <- Sys.time()
while (length(backendSessionId) == 0 && commandStart + waitSeconds > Sys.time()) {
backendSessionId <- readInt(gateway)
Sys.sleep(0.1)
}
redirectGatewayPort <- readInt(gateway)
backendPort <- readInt(gateway)
if (length(backendSessionId) == 0 || length(redirectGatewayPort) == 0 || length(backendPort) == 0) {
if (isStarting) {
stop(
"Sparklyr gateway did not respond while retrieving ports information after ",
waitSeconds,
" seconds.",
sparklyr_gateway_trouble_shooting_msg()
)
} else {
return(NULL)
}
}
list(
gateway = gateway,
backendPort = backendPort,
redirectGatewayPort = redirectGatewayPort
)
}
spark_connect_gateway <- function(
gatewayAddress,
gatewayPort,
sessionId,
config,
isStarting = FALSE) {
# try connecting to existing gateway
gateway <- wait_connect_gateway(gatewayAddress, gatewayPort, config, isStarting)
if (is.null(gateway)) {
if (isStarting) {
stop(
"Gateway in ", gatewayAddress, ":", gatewayPort, " did not respond.",
sparklyr_gateway_trouble_shooting_msg()
)
}
NULL
}
else {
worker_log("is querying ports from backend using port ", gatewayPort)
gateway_ports_query_attempts <- as.integer(
spark_config_value(config, "sparklyr.gateway.port.query.attempts", 3L)
)
gateway_ports_query_retry_interval_s <- as.integer(
spark_config_value(config, "sparklyr.gateway.port.query.retry.interval.seconds", 4L)
)
while (gateway_ports_query_attempts > 0) {
gateway_ports_query_attempts <- gateway_ports_query_attempts - 1
withCallingHandlers(
{
gatewayPortsQuery <- query_gateway_for_port(
gateway,
sessionId,
config,
isStarting
)
break
},
error = function(e) {
isStarting <- FALSE
if (gateway_ports_query_attempts > 0) {
Sys.sleep(gateway_ports_query_retry_interval_s)
}
NULL
}
)
}
if (is.null(gatewayPortsQuery) && !isStarting) {
close(gateway)
return(NULL)
}
redirectGatewayPort <- gatewayPortsQuery$redirectGatewayPort
backendPort <- gatewayPortsQuery$backendPort
worker_log("found redirect gateway port ", redirectGatewayPort)
if (redirectGatewayPort == 0) {
close(gateway)
if (isStarting) {
stop("Gateway in ", gatewayAddress, ":", gatewayPort, " does not have the requested session registered")
}
NULL
} else if (redirectGatewayPort != gatewayPort) {
close(gateway)
spark_connect_gateway(gatewayAddress, redirectGatewayPort, sessionId, config, isStarting)
}
else {
list(
gateway = gateway,
backendPort = backendPort
)
}
}
}
core_invoke_sync_socket <- function(sc) {
flush <- c(1)
while (length(flush) > 0) {
flush <- readBin(sc$backend, raw(), 1000)
# while flushing monitored connections we don't want to hang forever
if (identical(sc$state$use_monitoring, TRUE)) break
}
}
core_invoke_sync <- function(sc) {
# sleep until connection clears is back on valid state
while (!core_invoke_synced(sc)) {
Sys.sleep(1)
core_invoke_sync_socket(sc)
}
}
core_invoke_cancel_running <- function(sc) {
if (is.null(spark_context(sc))) {
return()
}
# if something fails while using a monitored connection we don't cancel jobs
if (identical(sc$state$use_monitoring, TRUE)) {
return()
}
# if something fails while cancelling jobs we don't cancel jobs, this can
# happen in OutOfMemory errors that shut down the spark context
if (identical(sc$state$cancelling_all_jobs, TRUE)) {
return()
}
connection_progress_context(sc, function() {
sc$state$cancelling_all_jobs <- TRUE
on.exit(sc$state$cancelling_all_jobs <- FALSE)
invoke(spark_context(sc), "cancelAllJobs")
})
if (exists("connection_progress_terminated")) connection_progress_terminated(sc)
}
write_bin_args <- function(backend, object, static, method, args, return_jobj_ref = FALSE) {
rc <- rawConnection(raw(), "r+")
writeString(rc, object)
writeBoolean(rc, static)
writeBoolean(rc, return_jobj_ref)
writeString(rc, method)
writeInt(rc, length(args))
writeArgs(rc, args)
bytes <- rawConnectionValue(rc)
close(rc)
rc <- rawConnection(raw(0), "r+")
writeInt(rc, length(bytes))
writeBin(bytes, rc)
con <- rawConnectionValue(rc)
close(rc)
writeBin(con, backend)
}
core_invoke_synced <- function(sc) {
if (is.null(sc)) {
stop("The connection is no longer valid.")
}
backend <- core_invoke_socket(sc)
echo_id <- "sparklyr"
write_bin_args(backend, "Handler", TRUE, "echo", echo_id)
returnStatus <- readInt(backend)
if (length(returnStatus) == 0 || returnStatus != 0) {
FALSE
}
else {
object <- readObject(sc)
identical(object, echo_id)
}
}
core_invoke_socket <- function(sc) {
if (identical(sc$state$use_monitoring, TRUE)) {
sc$monitoring
} else {
sc$backend
}
}
core_invoke_socket_name <- function(sc) {
if (identical(sc$state$use_monitoring, TRUE)) {
"monitoring"
} else {
"backend"
}
}
core_remove_jobjs <- function(sc, ids) {
core_invoke_method_impl(sc, static = TRUE, noreply = TRUE, "Handler", "rm", FALSE, as.list(ids))
}
core_invoke_method <- function(sc, static, object, method, return_jobj_ref, ...) {
core_invoke_method_impl(sc, static, noreply = FALSE, object, method, return_jobj_ref, ...)
}
core_invoke_method_impl <- function(sc, static, noreply, object, method, return_jobj_ref, ...) {
# N.B.: the reference to `object` must be retained until after a value or exception is returned to us
# from the invoked method here (i.e., cannot have `object <- something_else` before that), because any
# re-assignment could cause the last reference to `object` to be destroyed and the underlying JVM object
# to be deleted from JVMObjectTracker before the actual invocation of the method could happen.
lockBinding("object", environment())
if (is.null(sc)) {
stop("The connection is no longer valid.")
}
args <- list(...)
# initialize status if needed
if (is.null(sc$state$status)) {
sc$state$status <- list()
}
# choose connection socket
backend <- core_invoke_socket(sc)
connection_name <- core_invoke_socket_name(sc)
if (!identical(object, "Handler")) {
toRemoveJobjs <- get_to_remove_jobjs(sc)
objsToRemove <- ls(toRemoveJobjs)
if (length(objsToRemove) > 0) {
core_remove_jobjs(sc, objsToRemove)
rm(list = objsToRemove, envir = toRemoveJobjs)
}
}
if (!identical(object, "Handler") &&
spark_config_value(sc$config, c("sparklyr.cancellable", "sparklyr.connection.cancellable"), TRUE)) {
# if connection still running, sync to valid state
if (identical(sc$state$status[[connection_name]], "running")) {
core_invoke_sync(sc)
}
# while exiting this function, if interrupted (still running), cancel server job
on.exit(core_invoke_cancel_running(sc))
sc$state$status[[connection_name]] <- "running"
}
# if the object is a jobj then get it's id
objId <- ifelse(inherits(object, "spark_jobj"), object$id, object)
write_bin_args(backend, objId, static, method, args, return_jobj_ref)
if (identical(object, "Handler") &&
(identical(method, "terminateBackend") || identical(method, "stopBackend"))) {
# by the time we read response, backend might be already down.
return(NULL)
}
result_object <- NULL
if (!noreply) {
# wait for a return status & result
returnStatus <- readInt(sc)
if (length(returnStatus) == 0) {
# read the spark log
msg <- core_read_spark_log_error(sc)
withr::with_options(list(
warning.length = 8000
), {
stop(
"Unexpected state in sparklyr backend: ",
msg,
call. = FALSE
)
})
}
if (returnStatus != 0) {
# get error message from backend and report to R
msg <- readString(sc)
withr::with_options(list(
warning.length = 8000
), {
if (nzchar(msg)) {
core_handle_known_errors(sc, msg)
stop(msg, call. = FALSE)
} else {
# read the spark log
msg <- core_read_spark_log_error(sc)
stop(msg, call. = FALSE)
}
})
}
result_object <- readObject(sc)
}
sc$state$status[[connection_name]] <- "ready"
on.exit(NULL)
attach_connection(result_object, sc)
}
jobj_subclass.shell_backend <- function(con) {
"shell_jobj"
}
jobj_subclass.spark_connection <- function(con) {
"shell_jobj"
}
jobj_subclass.spark_worker_connection <- function(con) {
"shell_jobj"
}
core_handle_known_errors <- function(sc, msg) {
# Some systems might have an invalid hostname that Spark <= 2.0.1 fails to handle
# gracefully and triggers unexpected errors such as #532. Under these versions,
# we proactevely test getLocalHost() to warn users of this problem.
if (grepl("ServiceConfigurationError.*tachyon", msg, ignore.case = TRUE)) {
warning(
"Failed to retrieve localhost, please validate that the hostname is correctly mapped. ",
"Consider running `hostname` and adding that entry to your `/etc/hosts` file."
)
}
else if (grepl("check worker logs for details", msg, ignore.case = TRUE) &&
spark_master_is_local(sc$master)) {
abort_shell(
"sparklyr worker rscript failure, check worker logs for details",
NULL, NULL, sc$output_file, sc$error_file
)
}
}
core_read_spark_log_error <- function(sc) {
# if there was no error message reported, then
# return information from the Spark logs. return
# all those with most recent timestamp
msg <- "failed to invoke spark command (unknown reason)"
try(silent = TRUE, {
log <- readLines(sc$output_file)
splat <- strsplit(log, "\\s+", perl = TRUE)
n <- length(splat)
timestamp <- splat[[n]][[2]]
regex <- paste("\\b", timestamp, "\\b", sep = "")
entries <- grep(regex, log, perl = TRUE, value = TRUE)
pasted <- paste(entries, collapse = "\n")
msg <- paste("failed to invoke spark command", pasted, sep = "\n")
})
msg
}
#' Retrieve a Spark JVM Object Reference
#'
#' This S3 generic is used for accessing the underlying Java Virtual Machine
#' (JVM) Spark objects associated with \R objects. These objects act as
#' references to Spark objects living in the JVM. Methods on these objects
#' can be called with the \code{\link{invoke}} family of functions.
#'
#' @param x An \R object containing, or wrapping, a \code{spark_jobj}.
#' @param ... Optional arguments; currently unused.
#'
#' @seealso \code{\link{invoke}}, for calling methods on Java object references.
#'
#' @exportClass spark_jobj
#' @export
spark_jobj <- function(x, ...) {
UseMethod("spark_jobj")
}
spark_jobj_id <- function(x) {
x$id
}
#' @export
spark_jobj.default <- function(x, ...) {
stop("Unable to retrieve a spark_jobj from object of class ",
paste(class(x), collapse = " "),
call. = FALSE
)
}
#' @export
spark_jobj.spark_jobj <- function(x, ...) {
x
}
#' @export
print.spark_jobj <- function(x, ...) {
print_jobj(spark_connection(x), x, ...)
}
#' Generic method for print jobj for a connection type
#'
#' @param sc \code{spark_connection} (used for type dispatch)
#' @param jobj Object to print
#'
#' @keywords internal
#'
#' @export
print_jobj <- function(sc, jobj, ...) {
UseMethod("print_jobj")
}
get_valid_jobjs <- function(con) {
if (is.null(con$state$validJobjs)) {
con$state$validJobjs <- new.env(parent = emptyenv())
}
con$state$validJobjs
}
get_to_remove_jobjs <- function(con) {
if (is.null(con$state$toRemoveJobjs)) {
con$state$toRemoveJobjs <- new.env(parent = emptyenv())
}
con$state$toRemoveJobjs
}
# Check if jobj points to a valid external JVM object
isValidJobj <- function(jobj) {
exists("connection", jobj) && exists(jobj$id, get_valid_jobjs(jobj$connection))
}
getJobj <- function(con, objId) {
newObj <- jobj_create(con, objId)
validJobjs <- get_valid_jobjs(con)
validJobjs[[objId]] <- get0(objId, validJobjs, ifnotfound = 0) + 1
newObj
}
jobj_subclass <- function(con) {
UseMethod("jobj_subclass")
}
# Handler for a java object that exists on the backend.
jobj_create <- function(con, objId) {
if (!is.character(objId)) {
stop("object id must be a character")
}
# NOTE: We need a new env for a jobj as we can only register
# finalizers for environments or external references pointers.
obj <- structure(new.env(parent = emptyenv()), class = c("spark_jobj", jobj_subclass(con)))
obj$id <- objId
# Register a finalizer to remove the Java object when this reference
# is garbage collected in R
reg.finalizer(obj, cleanup.jobj)
obj
}
jobj_info <- function(jobj) {
if (!inherits(jobj, "spark_jobj")) {
stop("'jobj_info' called on non-jobj")
}
class <- NULL
repr <- NULL
tryCatch(
{
class <- invoke(jobj, "getClass")
if (inherits(class, "spark_jobj")) {
class <- invoke(class, "getName")
}
},
error = function(e) {
}
)
tryCatch(
{
repr <- invoke(jobj, "toString")
},
error = function(e) {
}
)
list(
class = class,
repr = repr
)
}
jobj_inspect <- function(jobj) {
print(jobj)
if (!connection_is_open(spark_connection(jobj))) {
return(jobj)
}
class <- invoke(jobj, "getClass")
cat("Fields:\n")
fields <- invoke(class, "getDeclaredFields")
lapply(fields, function(field) {
print(field)
})
cat("Methods:\n")
methods <- invoke(class, "getDeclaredMethods")
lapply(methods, function(method) {
print(method)
})
jobj
}
cleanup.jobj <- function(jobj) {
if (isValidJobj(jobj)) {
objId <- jobj$id
validJobjs <- get_valid_jobjs(jobj$connection)
validJobjs[[objId]] <- validJobjs[[objId]] - 1
if (validJobjs[[objId]] == 0) {
rm(list = objId, envir = validJobjs)
# NOTE: We cannot call removeJObject here as the finalizer may be run
# in the middle of another RPC. Thus we queue up this object Id to be removed
# and then run all the removeJObject when the next RPC is called.
toRemoveJobjs <- get_to_remove_jobjs(jobj$connection)
toRemoveJobjs[[objId]] <- 1
}
}
}
clear_jobjs <- function() {
scons <- spark_connection_find()
for (scon in scons) {
validJobjs <- get_valid_jobjs(scons)
valid <- ls(validJobjs)
rm(list = valid, envir = validJobjs)
toRemoveJobjs <- get_to_remove_jobjs(scons)
removeList <- ls(toRemoveJobjs)
rm(list = removeList, envir = toRemoveJobjs)
}
}
attach_connection <- function(jobj, connection) {
if (inherits(jobj, "spark_jobj")) {
jobj$connection <- connection
}
else if (is.list(jobj) || inherits(jobj, "struct")) {
jobj <- lapply(jobj, function(e) {
attach_connection(e, connection)
})
}
else if (is.environment(jobj)) {
jobj <- eapply(jobj, function(e) {
attach_connection(e, connection)
})
}
jobj
}
# Utility functions to serialize R objects so they can be read in Java.
# nolint start
# Type mapping from R to Java
#
# NULL -> Void
# integer -> Int
# character -> String
# logical -> Boolean
# double, numeric -> Double
# raw -> Array[Byte]
# Date -> Date
# POSIXct,POSIXlt -> Timestamp
#
# list[T] -> Array[T], where T is one of above mentioned types
# environment -> Map[String, T], where T is a native type
# jobj -> Object, where jobj is an object created in the backend
# nolint end
getSerdeType <- function(object) {
type <- class(object)[[1]]
if (type != "list") {
type
} else {
# Check if all elements are of same type
elemType <- unique(sapply(object, function(elem) {
getSerdeType(elem)
}))
if (length(elemType) <= 1) {
# Check that there are no NAs in arrays since they are unsupported in scala
hasNAs <- any(is.na(object))
if (hasNAs) {
"list"
} else {
"array"
}
} else {
"list"
}
}
}
writeObject <- function(con, object, writeType = TRUE) {
type <- class(object)[[1]]
if (type %in% c("integer", "character", "logical", "double", "numeric", "factor", "Date", "POSIXct")) {
if (is.na(object)) {
object <- NULL
type <- "NULL"
}
}
serdeType <- getSerdeType(object)
if (writeType) {
writeType(con, serdeType)
}
switch(serdeType,
NULL = writeVoid(con),
integer = writeInt(con, object),
character = writeString(con, object),
logical = writeBoolean(con, object),
double = writeDouble(con, object),
numeric = writeDouble(con, object),
raw = writeRaw(con, object),
array = writeArray(con, object),
list = writeList(con, object),
struct = writeList(con, object),
spark_jobj = writeJobj(con, object),
environment = writeEnv(con, object),
Date = writeDate(con, object),
POSIXlt = writeTime(con, object),
POSIXct = writeTime(con, object),
factor = writeFactor(con, object),
`data.frame` = writeList(con, object),
spark_apply_binary_result = writeList(con, object),
stop("Unsupported type '", serdeType, "' for serialization")
)
}
writeVoid <- function(con) {
# no value for NULL
}
writeJobj <- function(con, value) {
if (!isValidJobj(value)) {
stop("invalid jobj ", value$id)
}
writeString(con, value$id)
}
writeString <- function(con, value) {
utfVal <- enc2utf8(as.character(value))
writeInt(con, as.integer(nchar(utfVal, type = "bytes") + 1))
writeBin(utfVal, con, endian = "big", useBytes = TRUE)
}
writeInt <- function(con, value) {
writeBin(as.integer(value), con, endian = "big")
}
writeDouble <- function(con, value) {
writeBin(value, con, endian = "big")
}
writeBoolean <- function(con, value) {
# TRUE becomes 1, FALSE becomes 0
writeInt(con, as.integer(value))
}
writeRaw <- function(con, batch) {
writeInt(con, length(batch))
writeBin(batch, con, endian = "big")
}
writeType <- function(con, class) {
type <- switch(class,
NULL = "n",
integer = "i",
character = "c",
logical = "b",
double = "d",
numeric = "d",
raw = "r",
array = "a",
list = "l",
struct = "s",
spark_jobj = "j",
environment = "e",
Date = "D",
POSIXlt = "t",
POSIXct = "t",
factor = "c",
`data.frame` = "l",
spark_apply_binary_result = "l",
stop("Unsupported type '", class, "' for serialization")
)
writeBin(charToRaw(type), con)
}
# Used to pass arrays where all the elements are of the same type
writeArray <- function(con, arr) {
# TODO: Empty lists are given type "character" right now.
# This may not work if the Java side expects array of any other type.
if (length(arr) == 0) {
elemType <- class("somestring")
} else {
elemType <- getSerdeType(arr[[1]])
}
writeType(con, elemType)
writeInt(con, length(arr))
if (length(arr) > 0) {
for (a in arr) {
writeObject(con, a, FALSE)
}
}
}
# Used to pass arrays where the elements can be of different types
writeList <- function(con, list) {
writeInt(con, length(list))
for (elem in list) {
writeObject(con, elem)
}
}
# Used to pass in hash maps required on Java side.
writeEnv <- function(con, env) {
len <- length(env)
writeInt(con, len)
if (len > 0) {
writeArray(con, as.list(ls(env)))
vals <- lapply(ls(env), function(x) {
env[[x]]
})
writeList(con, as.list(vals))
}
}
writeDate <- function(con, date) {
writeInt(con, as.integer(date))
}
writeTime <- function(con, time) {
writeDouble(con, as.double(time))
}
writeFactor <- function(con, factor) {
writeString(con, as.character(factor))
}
# Used to serialize in a list of objects where each
# object can be of a different type. Serialization format is
# <object type> <object> for each object
writeArgs <- function(con, args) {
if (length(args) > 0) {
for (a in args) {
writeObject(con, a)
}
}
}
core_get_package_function <- function(packageName, functionName) {
if (packageName %in% rownames(installed.packages()) &&
exists(functionName, envir = asNamespace(packageName))) {
get(functionName, envir = asNamespace(packageName))
} else {
NULL
}
}
worker_config_serialize <- function(config) {
paste(
if (isTRUE(config$debug)) "TRUE" else "FALSE",
spark_config_value(config, "sparklyr.worker.gateway.port", "8880"),
spark_config_value(config, "sparklyr.worker.gateway.address", "localhost"),
if (isTRUE(config$profile)) "TRUE" else "FALSE",
if (isTRUE(config$schema)) "TRUE" else "FALSE",
if (isTRUE(config$arrow)) "TRUE" else "FALSE",
if (isTRUE(config$fetch_result_as_sdf)) "TRUE" else "FALSE",
if (isTRUE(config$single_binary_column)) "TRUE" else "FALSE",
config$spark_version,
sep = ";"
)
}
worker_config_deserialize <- function(raw) {
parts <- strsplit(raw, ";")[[1]]
list(
debug = as.logical(parts[[1]]),
sparklyr.gateway.port = as.integer(parts[[2]]),
sparklyr.gateway.address = parts[[3]],
profile = as.logical(parts[[4]]),
schema = as.logical(parts[[5]]),
arrow = as.logical(parts[[6]]),
fetch_result_as_sdf = as.logical(parts[[7]]),
single_binary_column = as.logical(parts[[8]]),
spark_version = parts[[9]]
)
}
# nocov start
spark_worker_context <- function(sc) {
hostContextId <- worker_invoke_method(sc, FALSE, "Handler", "getHostContext")
worker_log("retrieved worker context id ", hostContextId)
context <- structure(
class = c("spark_jobj", "shell_jobj"),
list(
id = hostContextId,
connection = sc
)
)
worker_log("retrieved worker context")
context
}
spark_worker_init_packages <- function(sc, context) {
bundlePath <- worker_invoke(context, "getBundlePath")
if (nchar(bundlePath) > 0) {
bundleName <- basename(bundlePath)
worker_log("using bundle name ", bundleName)
workerRootDir <- worker_invoke_static(sc, "org.apache.spark.SparkFiles", "getRootDirectory")
sparkBundlePath <- file.path(workerRootDir, bundleName)
worker_log("using bundle path ", normalizePath(sparkBundlePath))
if (!file.exists(sparkBundlePath)) {
stop("failed to find bundle under SparkFiles root directory")
}
unbundlePath <- worker_spark_apply_unbundle(
sparkBundlePath,
workerRootDir,
tools::file_path_sans_ext(bundleName)
)
.libPaths(unbundlePath)
worker_log("updated .libPaths with bundle packages")
}
else {
spark_env <- worker_invoke_static(sc, "org.apache.spark.SparkEnv", "get")
spark_libpaths <- worker_invoke(worker_invoke(spark_env, "conf"), "get", "spark.r.libpaths", NULL)
if (!is.null(spark_libpaths)) {
spark_libpaths <- unlist(strsplit(spark_libpaths, split = ","))
.libPaths(spark_libpaths)
}
}
}
spark_worker_execute_closure <- function(
sc,
closure,
df,
funcContext,
grouped_by,
barrier_map,
fetch_result_as_sdf,
partition_index) {
if (nrow(df) == 0) {
worker_log("found that source has no rows to be proceesed")
return(NULL)
}
barrier_arg <- NULL
worker_log("barrier is ", as.character(barrier_map))
if (length(barrier_map) > 0) {
worker_log("found barrier execution context")
barrier_arg <- list(barrier = barrier_map)
}
closure_params <- length(formals(closure))
has_partition_index_param <- (
!is.null(funcContext$partition_index_param) &&
nchar(funcContext$partition_index_param) > 0
)
if (has_partition_index_param) closure_params <- closure_params - 1
closure_args <- c(
list(df),
if (!is.null(funcContext$user_context)) list(funcContext$user_context) else NULL,
lapply(grouped_by, function(group_by_name) df[[group_by_name]][[1]]),
barrier_arg
)[0:closure_params]
if (has_partition_index_param) {
closure_args[[funcContext$partition_index_param]] <- partition_index
}
worker_log("computing closure")
result <- do.call(closure, closure_args)
worker_log("computed closure")
as_factors <- getOption("stringsAsFactors")
on.exit(options(stringsAsFactors = as_factors))
options(stringsAsFactors = FALSE)
if (identical(fetch_result_as_sdf, FALSE)) {
serialize_impl <- spark_worker_get_serializer(sc)
result <- lapply(result, function(x) serialize_impl(x, NULL))
class(result) <- c("spark_apply_binary_result", class(result))
result <- tibble::tibble(spark_apply_binary_result = result)
}
if (!"data.frame" %in% class(result)) {
worker_log("data.frame expected but ", class(result), " found")
result <- as.data.frame(result)
}
result
}
spark_worker_clean_factors <- function(result) {
if (any(sapply(result, is.factor))) {
result <- as.data.frame(lapply(result, function(x) if (is.factor(x)) as.character(x) else x), stringsAsFactors = FALSE)
}
result
}
spark_worker_maybe_serialize_list_cols_as_json <- function(config, result) {
if (identical(config$fetch_result_as_sdf, TRUE) &&
config$spark_version >= "2.4.0" &&
any(sapply(result, is.list))) {
result <- do.call(
tibble::tibble,
lapply(
result,
function(x) {
if (is.list(x)) {
x <- sapply(
x,
function(e) jsonlite::toJSON(e, auto_unbox = TRUE, digits = NA)
)
class(x) <- c(class(x), "list_col_as_json")
}
x
}
)
)
}
result
}
spark_worker_apply_maybe_schema <- function(config, result) {
if (identical(config$schema, TRUE)) {
worker_log("updating schema")
col_names <- colnames(result)
types <- list()
json_cols <- list()
for (i in seq_along(result)) {
if ("list_col_as_json" %in% class(result[[i]])) {
json_cols <- append(json_cols, col_names[[i]])
types <- append(types, "character")
} else {
types <- append(types, class(result[[i]])[[1]])
}
}
result <- data.frame(
names = paste(col_names, collapse = "|"),
types = paste(types, collapse = "|"),
json_cols = paste(json_cols, collapse = "|"),
stringsAsFactors = FALSE
)
}
result
}
spark_worker_build_types <- function(context, columns) {
names <- names(columns)
sqlutils <- worker_invoke(context, "getSqlUtils")
fields <- worker_invoke(
sqlutils,
"createStructFields",
lapply(
names,
function(name) {
list(name, columns[[name]][[1]], TRUE)
}
)
)
worker_invoke(sqlutils, "createStructType", fields)
}
spark_worker_get_group_batch <- function(batch) {
worker_invoke(
batch, "get", 0L
)
}
spark_worker_add_group_by_column <- function(df, result, grouped, grouped_by) {
if (grouped) {
if (nrow(result) > 0) {
new_column_values <- lapply(grouped_by, function(grouped_by_name) df[[grouped_by_name]][[1]])
names(new_column_values) <- grouped_by
if ("AsIs" %in% class(result)) class(result) <- class(result)[-match("AsIs", class(result))]
result <- do.call("cbind", list(new_column_values, result))
names(result) <- gsub("\\.", "_", make.unique(names(result)))
}
else {
result <- NULL
}
}
result
}
get_arrow_converters <- function(context, config) {
if (config$spark_version < "2.3.0") {
stop("ArrowConverters is only supported for Spark 2.3 or above.")
}
worker_invoke(context, "getArrowConverters")
}
get_arrow_converters_impl <- function(context, config) {
if (config$spark_version < "2.3.0") {
stop("ArrowConverters is only supported for Spark 2.3 or above.")
}
worker_invoke(context, "getArrowConvertersImpl")
}
spark_worker_apply_arrow <- function(sc, config) {
worker_log("using arrow serializer")
context <- spark_worker_context(sc)
spark_worker_init_packages(sc, context)
deserialize_impl <- spark_worker_get_deserializer(sc)
closure <- deserialize_impl(worker_invoke(context, "getClosure"))
funcContext <- deserialize_impl(worker_invoke(context, "getContext"))
grouped_by <- worker_invoke(context, "getGroupBy")
grouped <- !is.null(grouped_by) && length(grouped_by) > 0
columnNames <- worker_invoke(context, "getColumns")
schema_input <- worker_invoke(context, "getSchema")
time_zone <- worker_invoke(context, "getTimeZoneId")
options_map <- worker_invoke(context, "getOptions")
barrier_map <- as.list(worker_invoke(context, "getBarrier"))
partition_index <- worker_invoke(context, "getPartitionIndex")
if (grouped) {
record_batch_raw_groups <- worker_invoke(context, "getSourceArray")
record_batch_raw_groups_idx <- 1
record_batch_raw <- spark_worker_get_group_batch(record_batch_raw_groups[[record_batch_raw_groups_idx]])
} else {
row_iterator <- worker_invoke(context, "getIterator")
arrow_converters_impl <- get_arrow_converters_impl(context, config)
record_batch_raw <- worker_invoke(
arrow_converters_impl,
"toBatchArray",
row_iterator,
schema_input,
time_zone,
as.integer(options_map[["maxRecordsPerBatch"]])
)
}
reader <- arrow_record_stream_reader(record_batch_raw)
record_entry <- arrow_read_record_batch(reader)
all_batches <- list()
total_rows <- 0
schema_output <- NULL
batch_idx <- 0
while (!is.null(record_entry)) {
batch_idx <- batch_idx + 1
worker_log("is processing batch ", batch_idx)
df <- arrow_as_tibble(record_entry)
result <- NULL
if (!is.null(df)) {
colnames(df) <- columnNames[seq_along(colnames(df))]
result <- spark_worker_execute_closure(
sc,
closure,
df,
funcContext,
grouped_by,
barrier_map,
config$fetch_result_as_sdf,
partition_index
)
result <- spark_worker_add_group_by_column(df, result, grouped, grouped_by)
result <- spark_worker_clean_factors(result)
result <- spark_worker_maybe_serialize_list_cols_as_json(config, result)
result <- spark_worker_apply_maybe_schema(config, result)
}
if (!is.null(result)) {
if (is.null(schema_output)) {
schema_output <- spark_worker_build_types(context, lapply(result, class))
}
raw_batch <- arrow_write_record_batch(result, config$spark_version)
all_batches[[length(all_batches) + 1]] <- raw_batch
total_rows <- total_rows + nrow(result)
}
record_entry <- arrow_read_record_batch(reader)
if (grouped && is.null(record_entry) && record_batch_raw_groups_idx < length(record_batch_raw_groups)) {
record_batch_raw_groups_idx <- record_batch_raw_groups_idx + 1
record_batch_raw <- spark_worker_get_group_batch(record_batch_raw_groups[[record_batch_raw_groups_idx]])
reader <- arrow_record_stream_reader(record_batch_raw)
record_entry <- arrow_read_record_batch(reader)
}
}
if (length(all_batches) > 0) {
worker_log("updating ", total_rows, " rows using ", length(all_batches), " row batches")
arrow_converters <- get_arrow_converters(context, config)
row_iter <- worker_invoke(arrow_converters, "fromPayloadArray", all_batches, schema_output)
worker_invoke(context, "setResultIter", row_iter)
worker_log("updated ", total_rows, " rows using ", length(all_batches), " row batches")
} else {
worker_log("found no rows in closure result")
}
worker_log("finished apply")
}
spark_worker_get_serializer <- function(sc) {
serializer <- unserialize(worker_invoke(spark_worker_context(sc), "getSerializer"))
if (is.list(serializer)) {
function(x, ...) serializer$serializer(x)
} else {
serializer
}
}
spark_worker_get_deserializer <- function(sc) {
unserialize(worker_invoke(spark_worker_context(sc), "getDeserializer"))
}
spark_worker_apply <- function(sc, config) {
context <- spark_worker_context(sc)
spark_worker_init_packages(sc, context)
grouped_by <- worker_invoke(context, "getGroupBy")
grouped <- !is.null(grouped_by) && length(grouped_by) > 0
if (grouped) worker_log("working over grouped data")
length <- worker_invoke(context, "getSourceArrayLength")
worker_log("found ", length, " rows")
groups <- worker_invoke(context, if (grouped) "getSourceArrayGroupedSeq" else "getSourceArraySeq")
worker_log("retrieved ", length(groups), " rows")
deserialize_impl <- spark_worker_get_deserializer(sc)
closureRaw <- worker_invoke(context, "getClosure")
closure <- deserialize_impl(closureRaw)
funcContextRaw <- worker_invoke(context, "getContext")
funcContext <- deserialize_impl(funcContextRaw)
closureRLangRaw <- worker_invoke(context, "getClosureRLang")
if (length(closureRLangRaw) > 0) {
worker_log("found rlang closure")
closureRLang <- spark_worker_rlang_unserialize()
if (!is.null(closureRLang)) {
closure <- closureRLang(closureRLangRaw)
worker_log("created rlang closure")
}
}
if (identical(config$schema, TRUE)) {
worker_log("is running to compute schema")
}
columnNames <- worker_invoke(context, "getColumns")
barrier_map <- as.list(worker_invoke(context, "getBarrier"))
partition_index <- worker_invoke(context, "getPartitionIndex")
if (!grouped) groups <- list(list(groups))
all_results <- NULL
for (group_entry in groups) {
# serialized groups are wrapped over single lists
data <- group_entry[[1]]
df <- (
if (config$single_binary_column) {
tibble::tibble(encoded = lapply(data, function(x) x[[1]]))
} else {
bind_rows <- core_get_package_function("dplyr", "bind_rows")
as_tibble <- core_get_package_function("tibble", "as_tibble")
if (!is.null(bind_rows) && !is.null(as_tibble)) {
do.call(
bind_rows,
lapply(
data, function(x) { as_tibble(x, .name_repair = "universal") }
)
)
} else {
warning("dplyr::bind_rows or tibble::as_tibble is unavailable, ",
"falling back to rbind implementation in base R. ",
"Inputs with list column(s) will not work.")
do.call(rbind.data.frame, c(data, list(stringsAsFactors = FALSE)))
}
})
if (!config$single_binary_column) {
# rbind removes Date classes so we re-assign them here
if (length(data) > 0 && ncol(df) > 0 && nrow(df) > 0) {
if (any(sapply(data[[1]], function(e) class(e)[[1]]) %in% c("Date", "POSIXct"))) {
first_row <- data[[1]]
for (idx in seq_along(first_row)) {
first_class <- class(first_row[[idx]])[[1]]
if (identical(first_class, "Date")) {
df[[idx]] <- as.Date(df[[idx]], origin = "1970-01-01")
} else if (identical(first_class, "POSIXct")) {
df[[idx]] <- as.POSIXct(df[[idx]], origin = "1970-01-01")
}
}
}
# cast column to correct type, for instance, when dealing with NAs.
for (i in seq_along(df)) {
target_type <- funcContext$column_types[[i]]
if (!is.null(target_type) && class(df[[i]]) != target_type) {
df[[i]] <- do.call(paste("as", target_type, sep = "."), args = list(df[[i]]))
}
}
}
}
colnames(df) <- columnNames[seq_along(colnames(df))]
result <- spark_worker_execute_closure(
sc,
closure,
df,
funcContext,
grouped_by,
barrier_map,
config$fetch_result_as_sdf,
partition_index
)
result <- spark_worker_add_group_by_column(df, result, grouped, grouped_by)
result <- spark_worker_clean_factors(result)
result <- spark_worker_maybe_serialize_list_cols_as_json(config, result)
result <- spark_worker_apply_maybe_schema(config, result)
all_results <- rbind(all_results, result)
}
if (!is.null(all_results) && nrow(all_results) > 0) {
worker_log("updating ", nrow(all_results), " rows")
all_data <- lapply(seq_len(nrow(all_results)), function(i) as.list(all_results[i, ]))
worker_invoke(context, "setResultArraySeq", all_data)
worker_log("updated ", nrow(all_results), " rows")
} else {
worker_log("found no rows in closure result")
}
worker_log("finished apply")
}
spark_worker_rlang_unserialize <- function() {
rlang_unserialize <- core_get_package_function("rlang", "bytes_unserialise")
if (is.null(rlang_unserialize)) {
core_get_package_function("rlanglabs", "bytes_unserialise")
} else {
rlang_unserialize
}
}
spark_worker_unbundle_path <- function() {
file.path("sparklyr-bundle")
}
#' Extracts a bundle of dependencies required by \code{spark_apply()}
#'
#' @param bundle_path Path to the bundle created using \code{spark_apply_bundle()}
#' @param base_path Base path to use while extracting bundles
#'
#' @keywords internal
#' @export
worker_spark_apply_unbundle <- function(bundle_path, base_path, bundle_name) {
extractPath <- file.path(base_path, spark_worker_unbundle_path(), bundle_name)
lockFile <- file.path(extractPath, "sparklyr.lock")
if (!dir.exists(extractPath)) dir.create(extractPath, recursive = TRUE)
if (length(dir(extractPath)) == 0) {
worker_log("found that the unbundle path is empty, extracting:", extractPath)
writeLines("", lockFile)
system2("tar", c("-xf", bundle_path, "-C", extractPath))
unlink(lockFile)
}
if (file.exists(lockFile)) {
worker_log("found that lock file exists, waiting")
while (file.exists(lockFile)) {
Sys.sleep(1)
}
worker_log("completed lock file wait")
}
extractPath
}
# nocov end
# nocov start
spark_worker_connect <- function(
sessionId,
backendPort = 8880,
config = list()) {
gatewayPort <- spark_config_value(config, "sparklyr.worker.gateway.port", backendPort)
gatewayAddress <- spark_config_value(config, "sparklyr.worker.gateway.address", "localhost")
config <- list()
worker_log("is connecting to backend using port ", gatewayPort)
gatewayInfo <- spark_connect_gateway(gatewayAddress,
gatewayPort,
sessionId,
config = config,
isStarting = TRUE
)
worker_log("is connected to backend")
worker_log("is connecting to backend session")
tryCatch(
{
interval <- spark_config_value(config, "sparklyr.backend.interval", 1)
backend <- socketConnection(
host = "localhost",
port = gatewayInfo$backendPort,
server = FALSE,
blocking = interval > 0,
open = "wb",
timeout = interval
)
class(backend) <- c(class(backend), "shell_backend")
},
error = function(err) {
close(gatewayInfo$gateway)
stop(
"Failed to open connection to backend:", err$message
)
}
)
worker_log("is connected to backend session")
sc <- structure(class = c("spark_worker_connection"), list(
# spark_connection
master = "",
method = "shell",
app_name = NULL,
config = NULL,
state = new.env(),
# spark_shell_connection
spark_home = NULL,
backend = backend,
gateway = gatewayInfo$gateway,
output_file = NULL
))
worker_log("created connection")
sc
}
# nocov end
# nocov start
connection_is_open.spark_worker_connection <- function(sc) {
bothOpen <- FALSE
if (!identical(sc, NULL)) {
tryCatch(
{
bothOpen <- isOpen(sc$backend) && isOpen(sc$gateway)
},
error = function(e) {
}
)
}
bothOpen
}
worker_connection <- function(x, ...) {
UseMethod("worker_connection")
}
worker_connection.spark_jobj <- function(x, ...) {
x$connection
}
# nocov end
# nocov start
worker_invoke_method <- function(sc, static, object, method, ...) {
core_invoke_method(sc, static, object, method, FALSE, ...)
}
worker_invoke <- function(jobj, method, ...) {
UseMethod("worker_invoke")
}
worker_invoke.shell_jobj <- function(jobj, method, ...) {
worker_invoke_method(worker_connection(jobj), FALSE, jobj, method, ...)
}
worker_invoke_static <- function(sc, class, method, ...) {
worker_invoke_method(sc, TRUE, class, method, ...)
}
worker_invoke_new <- function(sc, class, ...) {
worker_invoke_method(sc, TRUE, class, "<init>", ...)
}
# nocov end
# nocov start
worker_log_env <- new.env()
worker_log_session <- function(sessionId) {
assign("sessionId", sessionId, envir = worker_log_env)
}
worker_log_format <- function(message, session, level = "INFO", component = "RScript") {
paste(
format(Sys.time(), "%y/%m/%d %H:%M:%S"),
" ",
level,
" sparklyr: ",
component,
" (",
session,
") ",
message,
sep = ""
)
}
worker_log_level <- function(..., level, component = "RScript") {
if (is.null(worker_log_env$sessionId)) {
worker_log_env <- get0("worker_log_env", envir = .GlobalEnv)
if (is.null(worker_log_env$sessionId)) {
return()
}
}
args <- list(...)
message <- paste(args, sep = "", collapse = "")
formatted <- worker_log_format(message, worker_log_env$sessionId,
level = level, component = component
)
cat(formatted, "\n")
}
worker_log <- function(...) {
worker_log_level(..., level = "INFO")
}
worker_log_warning <- function(...) {
worker_log_level(..., level = "WARN")
}
worker_log_error <- function(...) {
worker_log_level(..., level = "ERROR")
}
# nocov end
# nocov start
.worker_globals <- new.env(parent = emptyenv())
spark_worker_main <- function(
sessionId,
backendPort = 8880,
configRaw = NULL) {
spark_worker_hooks()
tryCatch(
{
worker_log_session(sessionId)
if (is.null(configRaw)) configRaw <- worker_config_serialize(list())
config <- worker_config_deserialize(configRaw)
if (identical(config$profile, TRUE)) {
profile_name <- paste("spark-apply-", as.numeric(Sys.time()), ".Rprof", sep = "")
worker_log("starting new profile in ", file.path(getwd(), profile_name))
utils::Rprof(profile_name)
}
if (config$debug) {
worker_log("exiting to wait for debugging session to attach")
# sleep for 1 day to allow long debugging sessions
Sys.sleep(60 * 60 * 24)
return()
}
worker_log("is starting")
options(sparklyr.connection.cancellable = FALSE)
sc <- spark_worker_connect(sessionId, backendPort, config)
worker_log("is connected")
if (config$arrow) {
spark_worker_apply_arrow(sc, config)
}
else {
spark_worker_apply(sc, config)
}
if (identical(config$profile, TRUE)) {
# utils::Rprof(NULL)
worker_log("closing profile")
}
},
error = function(e) {
worker_log_error("terminated unexpectedly: ", e$message)
if (exists(".stopLastError", envir = .worker_globals)) {
worker_log_error("collected callstack: \n", get(".stopLastError", envir = .worker_globals))
}
quit(status = -1)
}
)
worker_log("finished")
}
spark_worker_hooks <- function() {
unlock <- get("unlockBinding")
lock <- get("lockBinding")
originalStop <- stop
unlock("stop", as.environment("package:base"))
assign("stop", function(...) {
frame_names <- list()
frame_start <- max(1, sys.nframe() - 5)
for (i in frame_start:sys.nframe()) {
current_call <- sys.call(i)
frame_names[[1 + i - frame_start]] <- paste(i, ": ", paste(head(deparse(current_call), 5), collapse = "\n"), sep = "")
}
assign(".stopLastError", paste(rev(frame_names), collapse = "\n"), envir = .worker_globals)
originalStop(...)
}, as.environment("package:base"))
lock("stop", as.environment("package:base"))
}
# nocov end
do.call(spark_worker_main, as.list(commandArgs(trailingOnly = TRUE)))
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -7.86605889828453e+39, 2.59028521047074e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615835813-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,048 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -7.86605889828453e+39, 2.59028521047074e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
library(dplyr)
library(RPostgreSQL)
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, dbname = "cst")
locations <- list(boston=c(42.358056, -71.063611),
providence=c(41.823611, -71.422222),
worcester=c(42.266667, -71.8))
climate_cities <- lapply(locations, function(loc) {
df <- dbGetQuery(con, statement=paste(
"SELECT date, prcp, tmax, tmin, wind",
"FROM location_data(",
loc[1], ",", loc[2],
")",
"ORDER BY date"))
names(df) <- toupper(names(df))
df
})
lapply(climate_cities, head)
save(climate_cities, file='../../data/climate_cities.rda')
| /scripts/postgres/climate_cities.R | no_license | HydrosystemsGroup/Weather-Generator | R | false | false | 612 | r | library(dplyr)
library(RPostgreSQL)
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, dbname = "cst")
locations <- list(boston=c(42.358056, -71.063611),
providence=c(41.823611, -71.422222),
worcester=c(42.266667, -71.8))
climate_cities <- lapply(locations, function(loc) {
df <- dbGetQuery(con, statement=paste(
"SELECT date, prcp, tmax, tmin, wind",
"FROM location_data(",
loc[1], ",", loc[2],
")",
"ORDER BY date"))
names(df) <- toupper(names(df))
df
})
lapply(climate_cities, head)
save(climate_cities, file='../../data/climate_cities.rda')
|
airbnb = read.csv("airbnb.csv")
set.seed(88)
RF500 = randomForest (price~., data = airbnb , ntree = 500)
RF1000 = randomForest (price~., data = airbnb , ntree = 1000)
RF2000 = randomForest (price~., data = airbnb , ntree = 2000)
print(mtry)
actualRF = randomForest(price ~. , data = airbnb, ntree = 1000, mtry = 6)
print(actualRF) | /random forest.R | no_license | tsechunhei/Airbnb-Price-Prediction-with-Machine-Learning | R | false | false | 343 | r | airbnb = read.csv("airbnb.csv")
set.seed(88)
RF500 = randomForest (price~., data = airbnb , ntree = 500)
RF1000 = randomForest (price~., data = airbnb , ntree = 1000)
RF2000 = randomForest (price~., data = airbnb , ntree = 2000)
print(mtry)
actualRF = randomForest(price ~. , data = airbnb, ntree = 1000, mtry = 6)
print(actualRF) |
#!/usr/bin/env Rscript
######################
# rCNV Project #
######################
# Copyright (c) 2022-Present Ryan L. Collins and the Talkowski Laboratory
# Distributed under terms of the MIT License (see LICENSE)
# Contact: Ryan L. Collins <rlcollins@g.harvard.edu>
# Utility functions used for locus highlight plots
#' Parse region for locus highlight
#'
#' Parse a tabix-style coordinate string for locus highlight
#'
#' @param region tabix-style string of region to be plotted
#' @param genome.in path to BEDTools-style .genome file
#' @param export.values boolean indicator to export values to .GlobalEnv \[default: TRUE\]
#' @param return.values boolean indicator to return values as vector \[default: FALSE\]
#'
#' @details if necessary, start and end coordinates will be bounded to the global
#' start and end coordinates of that chromosome as per `genome.in`
#'
#' @return vector (if `return.values` is `TRUE`)
#'
#' @export parse.region.for.highlight
#' @export
parse.region.for.highlight <- function(region, genome.in, export.values=TRUE,
return.values=FALSE){
genome <- read.table(genome.in, sep="\t", header=F)
region.parts <- unlist(strsplit(region, split=":"))
chrom <- region.parts[1]
chrom.end <- genome[which(genome[, 1] == chrom), 2]
start <- max(c(1, as.numeric(unlist(strsplit(region.parts[2], split="-"))[1])))
end <- min(c(as.numeric(unlist(strsplit(region.parts[2], split="-"))[2]), chrom.end))
if(export.values){
assign("chrom", chrom, envir=.GlobalEnv)
assign("start", start, envir=.GlobalEnv)
assign("end", end, envir=.GlobalEnv)
}
if(return.values){
return(c(chrom, start, end))
}
}
#' Load genes from GTF for plotting
#'
#' Extract gene features from a GTF for plotting
#'
#' @param gtf.in path to .gtf
#' @param region coordinates of region to be extracted
#' @param rstudio.local boolean to indicate local Rstudio environment \[default: FALSE\]
#'
#' @details `region` coordinates must be a `tabix`-compatbile string
#'
#' @return data.frame
#'
#' @export
load.genes.from.gtf <- function(gtf.in, region, rstudio.local=FALSE){
# Required for bedr in local Rstudio only:
if(rstudio.local){
Sys.setenv(PATH = paste(Sys.getenv("PATH"), "~/anaconda3/envs/py3/bin", sep = ":"))
}
# Tabix region of interest
if(!file.exists(paste(gtf.in, "tbi", sep="."))){
stop(paste("tabix index not found for input file", gtf.in))
}
require(bedr, quietly=T)
gtf <- bedr::tabix(region, gtf.in, check.chr=FALSE, verbose=FALSE)
# Reformat entries
if(!is.null(gtf)){
colnames(gtf) <- c("chr", "source", "feature", "start", "end", "score",
"strand", "frame", "attribute")
gtf$gene <- sapply(gtf$attribute, function(atrs.str){
atrs <- unlist(strsplit(atrs.str, split=";"))
parts <- unlist(strsplit(atrs[grep("gene_name", atrs)], split=" "))
gsub("\"", "", parts[length(parts)])
})
gtf$transcript <- sapply(gtf$attribute, function(atrs.str){
atrs <- unlist(strsplit(atrs.str, split=";"))
parts <- unlist(strsplit(atrs[grep("transcript_id", atrs)], split=" "))
gsub("\"", "", parts[length(parts)])
})
gtf <- gtf[, c("chr", "start", "end", "gene", "strand", "feature", "transcript")]
gtf[, c("start", "end")] <- apply(gtf[, c("start", "end")], 2, as.numeric)
}else{
gtf <- data.frame("chr"=character(), "start"=numeric(), "end"=numeric(),
"gene"=character(), "strand"=character(), "feature"=character(),
"transcript"=character())
}
return(gtf)
}
#' Load sumstats from BED
#'
#' Extract meta-analysis summary statistics from a single BED
#'
#' @param bedpath path to .bed file with summary statistics
#' @param region coordinates of region to be extracted
#' @param keep.intervals boolean to retain start/end coordinates \[default: compute midpoint\]
#' @param rstudio.local boolean to indicate local Rstudio environment \[default: FALSE\]
#'
#' @details `region` coordinates must be a `tabix`-compatbile string
#'
#' @return data.frame
#'
#' @export
load.sumstats.for.region <- function(bedpath, region, keep.intervals=FALSE,
rstudio.local=FALSE){
# Required for bedr in local Rstudio only:
if(rstudio.local){
Sys.setenv(PATH = paste(Sys.getenv("PATH"), "~/anaconda3/envs/py3/bin", sep = ":"))
}
# Tabix region of interest
if(!file.exists(paste(bedpath, "tbi", sep="."))){
stop(paste("tabix index not found for input file", bedpath))
}
require(bedr, quietly=T)
ss <- bedr::tabix(region, bedpath, check.chr=FALSE, verbose=FALSE)
colnames(ss)[which(colnames(ss) == "stop")] <- "end"
# Return based on value of keep.intervals
if(keep.intervals){
return(ss[, c("chr", "start", "end", "meta_neg_log10_p", "meta_lnOR",
"meta_lnOR_lower", "meta_lnOR_upper")])
}else{
# Add midpoint unless keep.intervals is TRUE
ss$pos <- (ss$start + ss$stop)/2
return(ss[, c("chr", "pos", "meta_neg_log10_p", "meta_lnOR",
"meta_lnOR_lower", "meta_lnOR_upper")])
}
}
#' Load PIPs for a list of genes
#'
#' Extract PIPs from BED for specified genes for locus highlight plotting
#'
#' @param pips.in path to .tsv of PIPs per gene
#' @param genes vector of gene symbols to retain
#' @param hpos HPO(s) of interest \[default: do not filter on HPO\]
#'
#' @return data.frame
#'
#' @export
load.pips.for.genelist <- function(pips.in, genes, hpos=NULL){
pips <- read.table(pips.in, header=T, sep="\t", comment.char="", check.names=F)
pips <- pips[which(pips$gene %in% genes),
c("gene", "PIP_final", "credible_set", "#HPO")]
colnames(pips) <- c("gene", "PIP", "credible_set", "HPO")
if(!is.null(hpos)){
pips <- pips[which(pips$HPO == hpo), ]
}
pips <- pips[, 1:3]
return(pips[which(!duplicated(pips)), ])
}
#' Load sample sizes for locus highlights
#'
#' Load sample sizes for case/control contrast for locus highlight plotting
#'
#' @param table.in path to .tsv with sample sizes per HPO
#' @param case.hpos vector of case HPOs to evaluate \[default: c("HP:0000118")\]
#' @param ctrl.hpo control HPO \[default: "HEALTHY_CONTROL"\]
#'
#' @return list with the following two elements:
#' 1. `$case` vector of case sample sizes
#' 2. `$ctrl` vector of control sample sizes
#'
#' @export
get.sample.sizes.for.highlight <- function(table.in, case.hpos=c("HP:0000118"),
ctrl.hpo="HEALTHY_CONTROL"){
n <- read.table(table.in, header=T, sep="\t", comment.char="")
n.case <- apply(n[which(n[, 1] %in% case.hpos),
grep("meta", colnames(n), fixed=T)],
2, max, na.rm=T)
n.ctrl <- n[which(n[, 1] == ctrl.hpo), grep("meta", colnames(n), fixed=T)]
return(list("case"=as.numeric(as.vector(n.case)),
"ctrl"=as.numeric(as.vector(n.ctrl))))
}
#' Load quantitative feature
#'
#' Load a quantitative feature track from a BED file for locus highlights
#'
#' @param bedpath path to .bed file with feature information
#' @param region coordinates of region to be extracted
#' @param keep.col column number to use as feature values \[default: 4\]
#' @param rstudio.local boolean to indicate local Rstudio environment \[default: FALSE\]
#'
#' @details `region` coordinates must be a `tabix`-compatbile string
#'
#' @return data.frame
#'
#' @export
load.feature.bed.for.highlight <- function(bedpath, region, keep.col=4,
rstudio.local=FALSE){
# Required for bedr in local Rstudio only:
if(rstudio.local){
Sys.setenv(PATH = paste(Sys.getenv("PATH"), "~/anaconda3/envs/py3/bin", sep = ":"))
}
# Tabix region of interest
if(!file.exists(paste(bedpath, "tbi", sep="."))){
stop(paste("tabix index not found for input file", bedpath))
}
require(bedr, quietly=T)
bed <- bedr::tabix(region, bedpath, check.chr=FALSE, verbose=FALSE)
if(!is.null(bed)){
bed <- bed[, c(1:3, keep.col)]
colnames(bed) <- c("chr", "start", "end", "value")
bed$value <- as.numeric(bed$value)
}else{
bed <- data.frame("chr"=character(), "start"=numeric(),
"end"=numeric(), value=numeric())
}
return(bed)
}
#' Load ChromHMM color code
#'
#' Load ChromHMM state color code as HEX from manifest .tsv
#'
#' @param chromhmm.manifest.in path to ChromHMM manifest .tsv
#'
#' @return named vector of colors per ChromHMM state
#'
#' @export
load.chromhmm.colors <- function(chromhmm.manifest.in){
mfst <- read.table(chromhmm.manifest.in, sep="\t", comment.char="", header=T)
colors <- sapply(mfst$COLOR.CODE, function(str){
cvals <- as.numeric(unlist(strsplit(str, split=",")))
rgb(cvals[1], cvals[2], cvals[3], maxColorValue=255)
})
names(colors) <- mfst[, 1]
return(colors)
}
#' Load ChromHMM tracks for locus highlight
#'
#' Load a set of ChromHMM tracks from an input .tsv and apply color scheme
#'
#' @param chromhmm.tracks.in .tsv of ChromHMM tracks to be loaded
#' @param chromhmm.manifest.in path to ChromHMM manifest .tsv
#' @param region coordinates of region to be extracted
#'
#' @return data.frame
#'
#' @seealso [load.chromhmm.colors]
#'
#' @export
load.chromhmm.tracks <- function(chromhmm.tracks.in, chromhmm.manifest.in, region){
tlist <- read.table(chromhmm.tracks.in)[, 1]
chmm.colors <- load.chromhmm.colors(chromhmm.manifest.in)
lapply(tlist, function(tpath){
track <- load.feature.bed(tpath, region)
track$color <- chmm.colors[track$value]
return(track)
})
}
#' Load CNVs for a region of interest
#'
#' Load CNVs from a single BED for a single region, and split by case/control
#'
#' @param bedpaths paths to one or more .bed files with CNVs
#' @param region coordinates of region to be extracted
#' @param cnv filter to this CNV type \[default: do not filter by CNV type\]
#' @param case.hpos vector of case HPOs to evaluate \[default: c("HP:0000118")\]
#' @param ctrl.hpo control HPO \[default: "HEALTHY_CONTROL"\]
#' @param rstudio.local boolean to indicate local Rstudio environment \[default: FALSE\]
#'
#' @details `region` coordinates must be a `tabix`-compatbile string. If multiple
#' .bed files are provided as a vector to `bedpaths`, the contents of these files
#' will be concatenated after loading.
#'
#' @return list with the following two elements:
#' 1. `$case` data.frame of case CNVs
#' 2. `$ctrl` data.frame of control CNVs
#'
#' @export
load.cnvs.from.region <- function(bedpaths, region, cnv=NULL,
case.hpos=c("HP:0000118"),
ctrl.hpo="HEALTHY_CONTROL",
rstudio.local=FALSE){
# Required for bedr in local Rstudio only:
if(rstudio.local){
Sys.setenv(PATH = paste(Sys.getenv("PATH"), "~/anaconda3/envs/py3/bin", sep = ":"))
}
# Tabix region of interest
require(bedr, quietly=T)
cnvs <- as.data.frame(do.call("rbind", lapply(bedpaths, function(bedpath){
if(!file.exists(paste(bedpath, "tbi", sep="."))){
stop(paste("tabix index not found for input file", bedpath))
}
bedr::tabix(region, bedpath, check.chr=FALSE, verbose=FALSE)
})))
# Sort & filter CNVs
empty.df <- data.frame("chr"=character(), "start"=numeric(),
"end"=numeric(), "cnv_id"=character(),
"cnv"=character(), "pheno"=character())
if(nrow(cnvs) > 0){
# Ensure consistent column names
colnames(cnvs) <- c("chr", "start", "end", "cnv_id", "cnv", "pheno")
cnvs <- cnvs[with(cnvs, order(start, end)), ]
if(!is.null(cnv)){
cnvs <- cnvs[which(cnvs$cnv==cnv), ]
}
if(nrow(cnvs) > 0){
case.cnv.idxs <- which(sapply(cnvs$pheno, function(pstr){
any(case.hpos %in% unlist(strsplit(pstr, split=";", fixed=T)))
}))
case.cnvs <- cnvs[case.cnv.idxs, ]
ctrl.cnvs <- cnvs[grep(ctrl.hpo, cnvs$pheno, fixed=T), ]
}else{
case.cnvs <- empty.df
ctrl.cnvs <- empty.df
}
}else{
case.cnvs <- empty.df
ctrl.cnvs <- empty.df
}
return(list("case"=case.cnvs, "ctrl"=ctrl.cnvs))
}
#' Load multiple CNV BEDs
#'
#' Load all CNVs for a specific region from a list of BEDs
#'
#' @param cnvlist .tsv of CNV .bed files to load
#' @param region coordinates of region to be extracted
#' @param cnv filter to this CNV type \[default: do not filter by CNV type\]
#' @param case.hpos vector of case HPOs to evaluate \[default: c("HP:0000118")\]
#' @param ctrl.hpo control HPO \[default: "HEALTHY_CONTROL"\]
#' @param rstudio.local boolean to indicate local Rstudio environment \[default: FALSE\]
#'
#' @details `region` coordinates must be a `tabix`-compatbile string
#'
#' @return list of outputs from [load.cnvs.from.region()] for each row in `cnvlist`
#'
#' @seealso [load.cnvs.from.region]
#'
#' @export
load.cnvs.from.region.multi <- function(cnvlist, region, cnv=NULL,
case.hpo="HP:0000118",
ctrl.hpo="HEALTHY_CONTROL",
rstudio.local=FALSE){
# cnvlist should be a tsv of (cohort, path) pairs
cnv.list <- read.table(cnvlist, header=F, sep="\t")
cnvs <- lapply(1:nrow(cnv.list), function(i){
load.cnvs.from.region(cnv.list[i, 2], region, cnv, case.hpo, ctrl.hpo, rstudio.local)
})
names(cnvs) <- cnv.list[, 1]
return(cnvs)
}
#' Format CNVs for locus highlight
#'
#' Transform CNV coordinates to plotting values (with colors) for locus highlights
#'
#' @param cnvs data.frame of CNVs to be plotted
#' @param start left-most plotting coordinate
#' @param end right-most plotting coordinate
#' @param dx plotting resolution along X axis, specified as total number of steps \[default: 100\]
#' @param cnv.height relative height for each CNV \[default: 1\]
#' @param cnv.buffer relative buffer between adjacent CNVs \[default: 0\]
#' @param bevel.switch.pct fraction of start/end of each CNV to be beveled \[default: 0.025\]
#' @param col CNV color \[default: blueblack\]
#' @param highlight.hpo highlight CNVs from this HPO in a different color
#' \[default: plot all CNVs in the same color\]
#' @param highlight.color color to be used for `highlight.hpo`
#' \[default: plot all CNVs in the same color\]
#'
#' @return list ofwith the following two elements:
#' `$cnvs`: plotting values for each individual CNV
#' `$counts`: total number of CNVs overlapping each bin on the X axis
#'
#' @seealso [load.cnvs.from.region]
#'
#' @export
pileup.cnvs.for.highlight <- function(cnvs, start=NULL, end=NULL, dx=100,
cnv.height=1, cnv.buffer=0,
bevel.switch.pct=0.025, col=blueblack,
highlight.hpo=NA, highlight.col=NULL){
# Set range of values to evaluate
if(is.null(start)){
start <- min(cnvs$start)
}
if(is.null(end)){
end <- max(cnvs$end)
}
x <- seq(start, end, length.out=dx+1)
# Set other scaling parameters
cnv.y.buf <- cnv.height * cnv.buffer
# Build empty dataframe of CNV pileup
counts <- data.frame("pos"=x, "count"=0, "idx"=1:length(x))
# Create plotting values for each CNV
# Note: must use for loop to increment counts after each CNV is added
cnv.plot.values <- list()
if(nrow(cnvs) > 0){
for(i in 1:nrow(cnvs)){
# Get CNV info and increment counts
cnv.id <- cnvs$cnv_id[i]
cnv.start <- cnvs$start[i]
cnv.end <- cnvs$end[i]
cnv.x.idxs <- which(x >= cnv.start & x <= cnv.end)
counts$count[cnv.x.idxs] <- counts$count[cnv.x.idxs] + cnv.height
cnv.hpos <- unlist(strsplit(cnvs$pheno[i], split=";", fixed=T))
# Create plotting vectors for each CNV
cnv.x <- c(x[cnv.x.idxs], rev(x[cnv.x.idxs]))
cnv.y <- c(counts$count[cnv.x.idxs] - cnv.y.buf,
rev(counts$count[cnv.x.idxs] - cnv.height + cnv.y.buf))
# Bevel edges by single dx
# Bevel left edge according to the CNV that came before
max.change.dist <- ceiling(bevel.switch.pct * dx)
if(length(cnv.x.idxs) > 0){
if(i == 1 | cnv.x.idxs[1] == 1){
dist.to.nearest.change <- 10e10
}else{
same.height.idxs <- which(prev.counts$count == max(counts$count[cnv.x.idxs]))
if(length(same.height.idxs) > 0){
dist.to.nearest.change <- min(cnv.x.idxs) - max(same.height.idxs)
}else{
dist.to.nearest.change <- 10e10
}
}
if(dist.to.nearest.change > max.change.dist){
cnv.y[1] <- counts$count[cnv.x.idxs[1]] - cnv.height + cnv.y.buf
}else{
cnv.y[length(cnv.y)] <- counts$count[cnv.x.idxs[1]] - cnv.y.buf
}
}
# Always bevel right edge sloping outward
cnv.y[length(cnv.x.idxs)] <- counts$count[cnv.x.idxs[length(cnv.x.idxs)]] - cnv.height + cnv.y.buf
# Assign color
if(!is.na(highlight.hpo)){
if(highlight.hpo %in% cnv.hpos){
cnv.color <- highlight.col
}else{
cnv.color <- col
}
}else{
cnv.color <- col
}
# Add CNV plotting values to output list
cnv.plot.values[[cnv.id]] <- list("x"=cnv.x, "y"=cnv.y, "color"=cnv.color)
# Save previous CNV's x indexes and counts for comparisons
prev.x.idxs <- cnv.x.idxs
prev.counts <- counts
}
}
# Increment final counts by cnv.y.buf (for plotting)
counts$counts <- counts$count + cnv.y.buf
return(list("cnvs"=cnv.plot.values, "counts"=counts))
}
| /source/rCNV2/R/highlight_utils.R | permissive | talkowski-lab/rCNV2 | R | false | false | 17,546 | r | #!/usr/bin/env Rscript
######################
# rCNV Project #
######################
# Copyright (c) 2022-Present Ryan L. Collins and the Talkowski Laboratory
# Distributed under terms of the MIT License (see LICENSE)
# Contact: Ryan L. Collins <rlcollins@g.harvard.edu>
# Utility functions used for locus highlight plots
#' Parse region for locus highlight
#'
#' Parse a tabix-style coordinate string for locus highlight
#'
#' @param region tabix-style string of region to be plotted
#' @param genome.in path to BEDTools-style .genome file
#' @param export.values boolean indicator to export values to .GlobalEnv \[default: TRUE\]
#' @param return.values boolean indicator to return values as vector \[default: FALSE\]
#'
#' @details if necessary, start and end coordinates will be bounded to the global
#' start and end coordinates of that chromosome as per `genome.in`
#'
#' @return vector (if `return.values` is `TRUE`)
#'
#' @export parse.region.for.highlight
#' @export
parse.region.for.highlight <- function(region, genome.in, export.values=TRUE,
return.values=FALSE){
genome <- read.table(genome.in, sep="\t", header=F)
region.parts <- unlist(strsplit(region, split=":"))
chrom <- region.parts[1]
chrom.end <- genome[which(genome[, 1] == chrom), 2]
start <- max(c(1, as.numeric(unlist(strsplit(region.parts[2], split="-"))[1])))
end <- min(c(as.numeric(unlist(strsplit(region.parts[2], split="-"))[2]), chrom.end))
if(export.values){
assign("chrom", chrom, envir=.GlobalEnv)
assign("start", start, envir=.GlobalEnv)
assign("end", end, envir=.GlobalEnv)
}
if(return.values){
return(c(chrom, start, end))
}
}
#' Load genes from GTF for plotting
#'
#' Extract gene features from a GTF for plotting
#'
#' @param gtf.in path to .gtf
#' @param region coordinates of region to be extracted
#' @param rstudio.local boolean to indicate local Rstudio environment \[default: FALSE\]
#'
#' @details `region` coordinates must be a `tabix`-compatbile string
#'
#' @return data.frame
#'
#' @export
load.genes.from.gtf <- function(gtf.in, region, rstudio.local=FALSE){
# Required for bedr in local Rstudio only:
if(rstudio.local){
Sys.setenv(PATH = paste(Sys.getenv("PATH"), "~/anaconda3/envs/py3/bin", sep = ":"))
}
# Tabix region of interest
if(!file.exists(paste(gtf.in, "tbi", sep="."))){
stop(paste("tabix index not found for input file", gtf.in))
}
require(bedr, quietly=T)
gtf <- bedr::tabix(region, gtf.in, check.chr=FALSE, verbose=FALSE)
# Reformat entries
if(!is.null(gtf)){
colnames(gtf) <- c("chr", "source", "feature", "start", "end", "score",
"strand", "frame", "attribute")
gtf$gene <- sapply(gtf$attribute, function(atrs.str){
atrs <- unlist(strsplit(atrs.str, split=";"))
parts <- unlist(strsplit(atrs[grep("gene_name", atrs)], split=" "))
gsub("\"", "", parts[length(parts)])
})
gtf$transcript <- sapply(gtf$attribute, function(atrs.str){
atrs <- unlist(strsplit(atrs.str, split=";"))
parts <- unlist(strsplit(atrs[grep("transcript_id", atrs)], split=" "))
gsub("\"", "", parts[length(parts)])
})
gtf <- gtf[, c("chr", "start", "end", "gene", "strand", "feature", "transcript")]
gtf[, c("start", "end")] <- apply(gtf[, c("start", "end")], 2, as.numeric)
}else{
gtf <- data.frame("chr"=character(), "start"=numeric(), "end"=numeric(),
"gene"=character(), "strand"=character(), "feature"=character(),
"transcript"=character())
}
return(gtf)
}
#' Load sumstats from BED
#'
#' Extract meta-analysis summary statistics from a single BED
#'
#' @param bedpath path to .bed file with summary statistics
#' @param region coordinates of region to be extracted
#' @param keep.intervals boolean to retain start/end coordinates \[default: compute midpoint\]
#' @param rstudio.local boolean to indicate local Rstudio environment \[default: FALSE\]
#'
#' @details `region` coordinates must be a `tabix`-compatbile string
#'
#' @return data.frame
#'
#' @export
load.sumstats.for.region <- function(bedpath, region, keep.intervals=FALSE,
rstudio.local=FALSE){
# Required for bedr in local Rstudio only:
if(rstudio.local){
Sys.setenv(PATH = paste(Sys.getenv("PATH"), "~/anaconda3/envs/py3/bin", sep = ":"))
}
# Tabix region of interest
if(!file.exists(paste(bedpath, "tbi", sep="."))){
stop(paste("tabix index not found for input file", bedpath))
}
require(bedr, quietly=T)
ss <- bedr::tabix(region, bedpath, check.chr=FALSE, verbose=FALSE)
colnames(ss)[which(colnames(ss) == "stop")] <- "end"
# Return based on value of keep.intervals
if(keep.intervals){
return(ss[, c("chr", "start", "end", "meta_neg_log10_p", "meta_lnOR",
"meta_lnOR_lower", "meta_lnOR_upper")])
}else{
# Add midpoint unless keep.intervals is TRUE
ss$pos <- (ss$start + ss$stop)/2
return(ss[, c("chr", "pos", "meta_neg_log10_p", "meta_lnOR",
"meta_lnOR_lower", "meta_lnOR_upper")])
}
}
#' Load PIPs for a list of genes
#'
#' Extract PIPs from BED for specified genes for locus highlight plotting
#'
#' @param pips.in path to .tsv of PIPs per gene
#' @param genes vector of gene symbols to retain
#' @param hpos HPO(s) of interest \[default: do not filter on HPO\]
#'
#' @return data.frame
#'
#' @export
load.pips.for.genelist <- function(pips.in, genes, hpos=NULL){
pips <- read.table(pips.in, header=T, sep="\t", comment.char="", check.names=F)
pips <- pips[which(pips$gene %in% genes),
c("gene", "PIP_final", "credible_set", "#HPO")]
colnames(pips) <- c("gene", "PIP", "credible_set", "HPO")
if(!is.null(hpos)){
pips <- pips[which(pips$HPO == hpo), ]
}
pips <- pips[, 1:3]
return(pips[which(!duplicated(pips)), ])
}
#' Load sample sizes for locus highlights
#'
#' Load sample sizes for case/control contrast for locus highlight plotting
#'
#' @param table.in path to .tsv with sample sizes per HPO
#' @param case.hpos vector of case HPOs to evaluate \[default: c("HP:0000118")\]
#' @param ctrl.hpo control HPO \[default: "HEALTHY_CONTROL"\]
#'
#' @return list with the following two elements:
#' 1. `$case` vector of case sample sizes
#' 2. `$ctrl` vector of control sample sizes
#'
#' @export
get.sample.sizes.for.highlight <- function(table.in, case.hpos=c("HP:0000118"),
ctrl.hpo="HEALTHY_CONTROL"){
n <- read.table(table.in, header=T, sep="\t", comment.char="")
n.case <- apply(n[which(n[, 1] %in% case.hpos),
grep("meta", colnames(n), fixed=T)],
2, max, na.rm=T)
n.ctrl <- n[which(n[, 1] == ctrl.hpo), grep("meta", colnames(n), fixed=T)]
return(list("case"=as.numeric(as.vector(n.case)),
"ctrl"=as.numeric(as.vector(n.ctrl))))
}
#' Load quantitative feature
#'
#' Load a quantitative feature track from a BED file for locus highlights
#'
#' @param bedpath path to .bed file with feature information
#' @param region coordinates of region to be extracted
#' @param keep.col column number to use as feature values \[default: 4\]
#' @param rstudio.local boolean to indicate local Rstudio environment \[default: FALSE\]
#'
#' @details `region` coordinates must be a `tabix`-compatbile string
#'
#' @return data.frame
#'
#' @export
load.feature.bed.for.highlight <- function(bedpath, region, keep.col=4,
rstudio.local=FALSE){
# Required for bedr in local Rstudio only:
if(rstudio.local){
Sys.setenv(PATH = paste(Sys.getenv("PATH"), "~/anaconda3/envs/py3/bin", sep = ":"))
}
# Tabix region of interest
if(!file.exists(paste(bedpath, "tbi", sep="."))){
stop(paste("tabix index not found for input file", bedpath))
}
require(bedr, quietly=T)
bed <- bedr::tabix(region, bedpath, check.chr=FALSE, verbose=FALSE)
if(!is.null(bed)){
bed <- bed[, c(1:3, keep.col)]
colnames(bed) <- c("chr", "start", "end", "value")
bed$value <- as.numeric(bed$value)
}else{
bed <- data.frame("chr"=character(), "start"=numeric(),
"end"=numeric(), value=numeric())
}
return(bed)
}
#' Load ChromHMM color code
#'
#' Load ChromHMM state color code as HEX from manifest .tsv
#'
#' @param chromhmm.manifest.in path to ChromHMM manifest .tsv
#'
#' @return named vector of colors per ChromHMM state
#'
#' @export
load.chromhmm.colors <- function(chromhmm.manifest.in){
mfst <- read.table(chromhmm.manifest.in, sep="\t", comment.char="", header=T)
colors <- sapply(mfst$COLOR.CODE, function(str){
cvals <- as.numeric(unlist(strsplit(str, split=",")))
rgb(cvals[1], cvals[2], cvals[3], maxColorValue=255)
})
names(colors) <- mfst[, 1]
return(colors)
}
#' Load ChromHMM tracks for locus highlight
#'
#' Load a set of ChromHMM tracks from an input .tsv and apply color scheme
#'
#' @param chromhmm.tracks.in .tsv of ChromHMM tracks to be loaded
#' @param chromhmm.manifest.in path to ChromHMM manifest .tsv
#' @param region coordinates of region to be extracted
#'
#' @return data.frame
#'
#' @seealso [load.chromhmm.colors]
#'
#' @export
load.chromhmm.tracks <- function(chromhmm.tracks.in, chromhmm.manifest.in, region){
tlist <- read.table(chromhmm.tracks.in)[, 1]
chmm.colors <- load.chromhmm.colors(chromhmm.manifest.in)
lapply(tlist, function(tpath){
track <- load.feature.bed(tpath, region)
track$color <- chmm.colors[track$value]
return(track)
})
}
#' Load CNVs for a region of interest
#'
#' Load CNVs from a single BED for a single region, and split by case/control
#'
#' @param bedpaths paths to one or more .bed files with CNVs
#' @param region coordinates of region to be extracted
#' @param cnv filter to this CNV type \[default: do not filter by CNV type\]
#' @param case.hpos vector of case HPOs to evaluate \[default: c("HP:0000118")\]
#' @param ctrl.hpo control HPO \[default: "HEALTHY_CONTROL"\]
#' @param rstudio.local boolean to indicate local Rstudio environment \[default: FALSE\]
#'
#' @details `region` coordinates must be a `tabix`-compatbile string. If multiple
#' .bed files are provided as a vector to `bedpaths`, the contents of these files
#' will be concatenated after loading.
#'
#' @return list with the following two elements:
#' 1. `$case` data.frame of case CNVs
#' 2. `$ctrl` data.frame of control CNVs
#'
#' @export
load.cnvs.from.region <- function(bedpaths, region, cnv=NULL,
case.hpos=c("HP:0000118"),
ctrl.hpo="HEALTHY_CONTROL",
rstudio.local=FALSE){
# Required for bedr in local Rstudio only:
if(rstudio.local){
Sys.setenv(PATH = paste(Sys.getenv("PATH"), "~/anaconda3/envs/py3/bin", sep = ":"))
}
# Tabix region of interest
require(bedr, quietly=T)
cnvs <- as.data.frame(do.call("rbind", lapply(bedpaths, function(bedpath){
if(!file.exists(paste(bedpath, "tbi", sep="."))){
stop(paste("tabix index not found for input file", bedpath))
}
bedr::tabix(region, bedpath, check.chr=FALSE, verbose=FALSE)
})))
# Sort & filter CNVs
empty.df <- data.frame("chr"=character(), "start"=numeric(),
"end"=numeric(), "cnv_id"=character(),
"cnv"=character(), "pheno"=character())
if(nrow(cnvs) > 0){
# Ensure consistent column names
colnames(cnvs) <- c("chr", "start", "end", "cnv_id", "cnv", "pheno")
cnvs <- cnvs[with(cnvs, order(start, end)), ]
if(!is.null(cnv)){
cnvs <- cnvs[which(cnvs$cnv==cnv), ]
}
if(nrow(cnvs) > 0){
case.cnv.idxs <- which(sapply(cnvs$pheno, function(pstr){
any(case.hpos %in% unlist(strsplit(pstr, split=";", fixed=T)))
}))
case.cnvs <- cnvs[case.cnv.idxs, ]
ctrl.cnvs <- cnvs[grep(ctrl.hpo, cnvs$pheno, fixed=T), ]
}else{
case.cnvs <- empty.df
ctrl.cnvs <- empty.df
}
}else{
case.cnvs <- empty.df
ctrl.cnvs <- empty.df
}
return(list("case"=case.cnvs, "ctrl"=ctrl.cnvs))
}
#' Load multiple CNV BEDs
#'
#' Load all CNVs for a specific region from a list of BEDs
#'
#' @param cnvlist .tsv of CNV .bed files to load
#' @param region coordinates of region to be extracted
#' @param cnv filter to this CNV type \[default: do not filter by CNV type\]
#' @param case.hpos vector of case HPOs to evaluate \[default: c("HP:0000118")\]
#' @param ctrl.hpo control HPO \[default: "HEALTHY_CONTROL"\]
#' @param rstudio.local boolean to indicate local Rstudio environment \[default: FALSE\]
#'
#' @details `region` coordinates must be a `tabix`-compatbile string
#'
#' @return list of outputs from [load.cnvs.from.region()] for each row in `cnvlist`
#'
#' @seealso [load.cnvs.from.region]
#'
#' @export
load.cnvs.from.region.multi <- function(cnvlist, region, cnv=NULL,
case.hpo="HP:0000118",
ctrl.hpo="HEALTHY_CONTROL",
rstudio.local=FALSE){
# cnvlist should be a tsv of (cohort, path) pairs
cnv.list <- read.table(cnvlist, header=F, sep="\t")
cnvs <- lapply(1:nrow(cnv.list), function(i){
load.cnvs.from.region(cnv.list[i, 2], region, cnv, case.hpo, ctrl.hpo, rstudio.local)
})
names(cnvs) <- cnv.list[, 1]
return(cnvs)
}
#' Format CNVs for locus highlight
#'
#' Transform CNV coordinates to plotting values (with colors) for locus highlights
#'
#' @param cnvs data.frame of CNVs to be plotted
#' @param start left-most plotting coordinate
#' @param end right-most plotting coordinate
#' @param dx plotting resolution along X axis, specified as total number of steps \[default: 100\]
#' @param cnv.height relative height for each CNV \[default: 1\]
#' @param cnv.buffer relative buffer between adjacent CNVs \[default: 0\]
#' @param bevel.switch.pct fraction of start/end of each CNV to be beveled \[default: 0.025\]
#' @param col CNV color \[default: blueblack\]
#' @param highlight.hpo highlight CNVs from this HPO in a different color
#' \[default: plot all CNVs in the same color\]
#' @param highlight.color color to be used for `highlight.hpo`
#' \[default: plot all CNVs in the same color\]
#'
#' @return list ofwith the following two elements:
#' `$cnvs`: plotting values for each individual CNV
#' `$counts`: total number of CNVs overlapping each bin on the X axis
#'
#' @seealso [load.cnvs.from.region]
#'
#' @export
pileup.cnvs.for.highlight <- function(cnvs, start=NULL, end=NULL, dx=100,
cnv.height=1, cnv.buffer=0,
bevel.switch.pct=0.025, col=blueblack,
highlight.hpo=NA, highlight.col=NULL){
# Set range of values to evaluate
if(is.null(start)){
start <- min(cnvs$start)
}
if(is.null(end)){
end <- max(cnvs$end)
}
x <- seq(start, end, length.out=dx+1)
# Set other scaling parameters
cnv.y.buf <- cnv.height * cnv.buffer
# Build empty dataframe of CNV pileup
counts <- data.frame("pos"=x, "count"=0, "idx"=1:length(x))
# Create plotting values for each CNV
# Note: must use for loop to increment counts after each CNV is added
cnv.plot.values <- list()
if(nrow(cnvs) > 0){
for(i in 1:nrow(cnvs)){
# Get CNV info and increment counts
cnv.id <- cnvs$cnv_id[i]
cnv.start <- cnvs$start[i]
cnv.end <- cnvs$end[i]
cnv.x.idxs <- which(x >= cnv.start & x <= cnv.end)
counts$count[cnv.x.idxs] <- counts$count[cnv.x.idxs] + cnv.height
cnv.hpos <- unlist(strsplit(cnvs$pheno[i], split=";", fixed=T))
# Create plotting vectors for each CNV
cnv.x <- c(x[cnv.x.idxs], rev(x[cnv.x.idxs]))
cnv.y <- c(counts$count[cnv.x.idxs] - cnv.y.buf,
rev(counts$count[cnv.x.idxs] - cnv.height + cnv.y.buf))
# Bevel edges by single dx
# Bevel left edge according to the CNV that came before
max.change.dist <- ceiling(bevel.switch.pct * dx)
if(length(cnv.x.idxs) > 0){
if(i == 1 | cnv.x.idxs[1] == 1){
dist.to.nearest.change <- 10e10
}else{
same.height.idxs <- which(prev.counts$count == max(counts$count[cnv.x.idxs]))
if(length(same.height.idxs) > 0){
dist.to.nearest.change <- min(cnv.x.idxs) - max(same.height.idxs)
}else{
dist.to.nearest.change <- 10e10
}
}
if(dist.to.nearest.change > max.change.dist){
cnv.y[1] <- counts$count[cnv.x.idxs[1]] - cnv.height + cnv.y.buf
}else{
cnv.y[length(cnv.y)] <- counts$count[cnv.x.idxs[1]] - cnv.y.buf
}
}
# Always bevel right edge sloping outward
cnv.y[length(cnv.x.idxs)] <- counts$count[cnv.x.idxs[length(cnv.x.idxs)]] - cnv.height + cnv.y.buf
# Assign color
if(!is.na(highlight.hpo)){
if(highlight.hpo %in% cnv.hpos){
cnv.color <- highlight.col
}else{
cnv.color <- col
}
}else{
cnv.color <- col
}
# Add CNV plotting values to output list
cnv.plot.values[[cnv.id]] <- list("x"=cnv.x, "y"=cnv.y, "color"=cnv.color)
# Save previous CNV's x indexes and counts for comparisons
prev.x.idxs <- cnv.x.idxs
prev.counts <- counts
}
}
# Increment final counts by cnv.y.buf (for plotting)
counts$counts <- counts$count + cnv.y.buf
return(list("cnvs"=cnv.plot.values, "counts"=counts))
}
|
"Rsquare2F" <-
function(R2=NULL, df.1=NULL, df.2=NULL, p=NULL, N=NULL)
{
if(is.null(df.1) & is.null(df.2) & !is.null(N) & !is.null(p))
{
df.1 <- p
df.2 <- N-p-1
}
if(is.null(df.1) | is.null(df.2)) stop("You have not specified \'df.1\', \'df.2\', \'N\', and/or \'p\' correctly.")
return((R2/df.1)/((1-R2)/df.2))
}
| /R/Rsquare2F.R | no_license | cran/MBESS | R | false | false | 324 | r | "Rsquare2F" <-
function(R2=NULL, df.1=NULL, df.2=NULL, p=NULL, N=NULL)
{
if(is.null(df.1) & is.null(df.2) & !is.null(N) & !is.null(p))
{
df.1 <- p
df.2 <- N-p-1
}
if(is.null(df.1) | is.null(df.2)) stop("You have not specified \'df.1\', \'df.2\', \'N\', and/or \'p\' correctly.")
return((R2/df.1)/((1-R2)/df.2))
}
|
library(data.table)
.debug <- "~/Dropbox/SA2UK"
.args <- if (interactive()) sprintf(c(
"%s/inputs/epi_data.rds"
), .debug) else commandArgs(trailingOnly = TRUE)
target <- tail(.args, 1)
jhurl <- "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/"
casesurl <- sprintf("%s/time_series_covid19_confirmed_global.csv", jhurl)
deathsurl <- sprintf("%s/time_series_covid19_deaths_global.csv", jhurl)
fetch <- function(url, vn) melt(fread(url)[
#`Country/Region` %in% c("South Africa", "United Kingdom") &
`Province/State` == ""
][, -c(1,3,4) ], id.vars = "Country/Region", variable.name = "date", value.name = vn)
# fetch ECDC data; requires network connection
cases.dt <- fetch(casesurl, "cases")
deaths.dt <- fetch(deathsurl, "deaths")
| /hpc_scripts/get_epi_data.r | no_license | thimotei/CFR_calculation | R | false | false | 804 | r | library(data.table)
.debug <- "~/Dropbox/SA2UK"
.args <- if (interactive()) sprintf(c(
"%s/inputs/epi_data.rds"
), .debug) else commandArgs(trailingOnly = TRUE)
target <- tail(.args, 1)
jhurl <- "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/"
casesurl <- sprintf("%s/time_series_covid19_confirmed_global.csv", jhurl)
deathsurl <- sprintf("%s/time_series_covid19_deaths_global.csv", jhurl)
fetch <- function(url, vn) melt(fread(url)[
#`Country/Region` %in% c("South Africa", "United Kingdom") &
`Province/State` == ""
][, -c(1,3,4) ], id.vars = "Country/Region", variable.name = "date", value.name = vn)
# fetch ECDC data; requires network connection
cases.dt <- fetch(casesurl, "cases")
deaths.dt <- fetch(deathsurl, "deaths")
|
library(tidyverse)
library(ggplot2)
library(scales)
install.packages(scale)
url='/Users/dennie/Desktop/NEU/ALY 6070/Module 5/House_Price_data.csv'
data <- read.csv(url)
glimpse(data)
summary(data)
names(data) <- tolower(names(data))
names(data)
head(data[1:5])
hist(data[,'lotarea'])
ggplot(data, aes(x = lotarea)) +
geom_histogram()
plot(lotarea ~ yearbuilt,
col = factor(neighborhood),
data = data[data[,'neighborhood'] %in% c('OldTown', 'Sawyer'),])
legend('topleft',
legend = c('OldTown', 'Sawyer'),
col = c('black','red'),
pch = 1)
#scatter plot
ggplot(data=data[data[,'neighborhood'] %in% c('OldTown', 'Sawyer'),],
aes(x=yearbuilt,
y=lotarea,
color = neighborhood))+
geom_point()
#house built after 1950
new_house <- filter(data, yearbuilt > 1950)
ggplot(new_house, aes(x = yearbuilt)) +
geom_histogram()
ggplot(data = data,
aes(x=lotfrontage, y=lotarea))+
geom_point()
#remove outlier on lotfrontage
ggplot(data, aes(x = lotfrontage)) +
geom_boxplot()
subset(data, lotfrontage > 150)
remove <- which(data$lotfrontage>150)
data<-data[-remove,]
#remove outlier on lotarea
ggplot(data, aes(x = lotarea)) +
geom_boxplot()
subset(data, lotarea >60000)
remove <- which(data$lotarea>60000)
data<-data[-remove,]
#
ggplot(data=data,
aes(x=yearbuilt, y=saleprice)) +
geom_point()
ggplot(data=data[data[,'neighborhood'] %in% c('OldTown', 'Sawyer'),],
aes(x=lotarea,
y=saleprice,
color = neighborhood))+
geom_point()
p3 <- ggplot(data,
aes(x=neighborhood,
y=saleprice)) +
theme(legend.position = 'top',
axis.text = element_text(size=6)) #output show nothing; doesn't generate a graph
p3
(p4 <- p3 + geom_point(aes(color = yearbuilt),
alpha = 0.5,
size = 1.5,
position = position_jitter(width = 0.25, height = 0)))
#modify the breaks for x-axis and color sales
p4 + scale_x_discrete(name='neighborhood')+
scale_color_continuous(name='',
breaks=c(1880,1920,1960,2000),
labels=c('80','20','60','00'))
#change the colors of low and high values
p4 + scale_x_discrete(name='neighborhood')+
scale_color_continuous(name='',
breaks=c(1880,1920,1960,2000),
labels=c('80','20','60','00'),
low='red',high='blue')
p5 <- ggplot(data, aes(x=yearbuilt, y=saleprice))
p5 + geom_line(aes(color = neighborhood))
(p5 <- p5+geom_line()+
facet_wrap(~neighborhood, ncol=10,scales='free_y',
as.table = TRUE))
| /Individual Rshiny Visualization.R | no_license | Dennieeeee/Data-Wrangling-in-R | R | false | false | 2,681 | r | library(tidyverse)
library(ggplot2)
library(scales)
install.packages(scale)
url='/Users/dennie/Desktop/NEU/ALY 6070/Module 5/House_Price_data.csv'
data <- read.csv(url)
glimpse(data)
summary(data)
names(data) <- tolower(names(data))
names(data)
head(data[1:5])
hist(data[,'lotarea'])
ggplot(data, aes(x = lotarea)) +
geom_histogram()
plot(lotarea ~ yearbuilt,
col = factor(neighborhood),
data = data[data[,'neighborhood'] %in% c('OldTown', 'Sawyer'),])
legend('topleft',
legend = c('OldTown', 'Sawyer'),
col = c('black','red'),
pch = 1)
#scatter plot
ggplot(data=data[data[,'neighborhood'] %in% c('OldTown', 'Sawyer'),],
aes(x=yearbuilt,
y=lotarea,
color = neighborhood))+
geom_point()
#house built after 1950
new_house <- filter(data, yearbuilt > 1950)
ggplot(new_house, aes(x = yearbuilt)) +
geom_histogram()
ggplot(data = data,
aes(x=lotfrontage, y=lotarea))+
geom_point()
#remove outlier on lotfrontage
ggplot(data, aes(x = lotfrontage)) +
geom_boxplot()
subset(data, lotfrontage > 150)
remove <- which(data$lotfrontage>150)
data<-data[-remove,]
#remove outlier on lotarea
ggplot(data, aes(x = lotarea)) +
geom_boxplot()
subset(data, lotarea >60000)
remove <- which(data$lotarea>60000)
data<-data[-remove,]
#
ggplot(data=data,
aes(x=yearbuilt, y=saleprice)) +
geom_point()
ggplot(data=data[data[,'neighborhood'] %in% c('OldTown', 'Sawyer'),],
aes(x=lotarea,
y=saleprice,
color = neighborhood))+
geom_point()
p3 <- ggplot(data,
aes(x=neighborhood,
y=saleprice)) +
theme(legend.position = 'top',
axis.text = element_text(size=6)) #output show nothing; doesn't generate a graph
p3
(p4 <- p3 + geom_point(aes(color = yearbuilt),
alpha = 0.5,
size = 1.5,
position = position_jitter(width = 0.25, height = 0)))
#modify the breaks for x-axis and color sales
p4 + scale_x_discrete(name='neighborhood')+
scale_color_continuous(name='',
breaks=c(1880,1920,1960,2000),
labels=c('80','20','60','00'))
#change the colors of low and high values
p4 + scale_x_discrete(name='neighborhood')+
scale_color_continuous(name='',
breaks=c(1880,1920,1960,2000),
labels=c('80','20','60','00'),
low='red',high='blue')
p5 <- ggplot(data, aes(x=yearbuilt, y=saleprice))
p5 + geom_line(aes(color = neighborhood))
(p5 <- p5+geom_line()+
facet_wrap(~neighborhood, ncol=10,scales='free_y',
as.table = TRUE))
|
library(dplyr)
library(tidyr)
library(readr)
library(ggplot2)
file_list <- read.csv("nhs_data/file_names_code.csv", stringsAsFactors = FALSE)
file_names <- file_list$all_ages
names <- file_list$disease
LoadData <- function(file_names, names){
mortality <- read.csv(paste0("nhs_data/", file_names), stringsAsFactors = FALSE) %>%
filter(ORG_TYPE_DESCRIPTION %in% c("LOCAL AUTHORITIES (boundaries as of April 2009)") |
ORG_TITLE == "England") %>%
mutate(year = YEAR,
local_area = ifelse(ORG_TITLE == "England", "England", NEW_CODE),
rate_all = DSR,
gender = SEX_CODE) %>%
select(year, local_area, rate_all, gender) %>%
mutate(cohort = names) %>%
distinct()
return(mortality)
}
all_data <- map2(file_names, names, LoadData) %>%
do.call(rbind, .) %>%
mutate(gender = case_when(cohort %in% c("breast_cancer", "cervical_cancer")~"F",
TRUE~gender))
# top and bottom 10%
high_low_estimated <- all_data %>%
group_by(cohort, gender) %>%
mutate(national = mean(subset(rate_all, local_area == "England"), na.rm = T),
rate_all = ifelse(is.na(rate_all), national, rate_all),
rank = rank(rate_all),
rank_group = cut(rank, breaks = 10)) %>%
mutate(low_rate = mean(subset(rate_all, rank < 32), na.rm = T),
high_rate = mean(subset(rate_all, rank > 284), na.rm = T)) %>%
ungroup() %>%
distinct(high_rate, low_rate, cohort, gender, national) %>%
mutate(abs_diff = high_rate-low_rate,
rel_diff = high_rate/low_rate) %>%
gather("estimate", "rate", 3:5) %>%
arrange(gender, desc(estimate), rate) %>%
mutate(cohort2 = factor(cohort, levels = .$cohort))
#scrath plot
ggplot(high_low_estimated %>%
filter(gender == "F"), aes(rate, cohort2)) +
geom_line(aes(group = cohort)) +
geom_point(aes(color = estimate)) +
scale_x_continuous(trans='log2')
| /mortality_rates_dot_plot.R | permissive | carriebennette/nhs-inequality-viz | R | false | false | 1,930 | r |
library(dplyr)
library(tidyr)
library(readr)
library(ggplot2)
file_list <- read.csv("nhs_data/file_names_code.csv", stringsAsFactors = FALSE)
file_names <- file_list$all_ages
names <- file_list$disease
LoadData <- function(file_names, names){
mortality <- read.csv(paste0("nhs_data/", file_names), stringsAsFactors = FALSE) %>%
filter(ORG_TYPE_DESCRIPTION %in% c("LOCAL AUTHORITIES (boundaries as of April 2009)") |
ORG_TITLE == "England") %>%
mutate(year = YEAR,
local_area = ifelse(ORG_TITLE == "England", "England", NEW_CODE),
rate_all = DSR,
gender = SEX_CODE) %>%
select(year, local_area, rate_all, gender) %>%
mutate(cohort = names) %>%
distinct()
return(mortality)
}
all_data <- map2(file_names, names, LoadData) %>%
do.call(rbind, .) %>%
mutate(gender = case_when(cohort %in% c("breast_cancer", "cervical_cancer")~"F",
TRUE~gender))
# top and bottom 10%
high_low_estimated <- all_data %>%
group_by(cohort, gender) %>%
mutate(national = mean(subset(rate_all, local_area == "England"), na.rm = T),
rate_all = ifelse(is.na(rate_all), national, rate_all),
rank = rank(rate_all),
rank_group = cut(rank, breaks = 10)) %>%
mutate(low_rate = mean(subset(rate_all, rank < 32), na.rm = T),
high_rate = mean(subset(rate_all, rank > 284), na.rm = T)) %>%
ungroup() %>%
distinct(high_rate, low_rate, cohort, gender, national) %>%
mutate(abs_diff = high_rate-low_rate,
rel_diff = high_rate/low_rate) %>%
gather("estimate", "rate", 3:5) %>%
arrange(gender, desc(estimate), rate) %>%
mutate(cohort2 = factor(cohort, levels = .$cohort))
#scrath plot
ggplot(high_low_estimated %>%
filter(gender == "F"), aes(rate, cohort2)) +
geom_line(aes(group = cohort)) +
geom_point(aes(color = estimate)) +
scale_x_continuous(trans='log2')
|
`SynAnticline` <-
function(x,y, syn=TRUE, spacing=NULL, N=1, r1= 1, r2= 1.2, h1= 0, h2= 0,endtol=.1,
REV=FALSE, col='black', ...)
{
if(missing(spacing)) spacing=NULL
if(missing(REV)) REV=FALSE
if(missing(r1)) { r1 = 1 }
if(missing(r2)) { r2 = 1.2 }
if(missing(h1)) { h1 = .5 }
if(missing(h2)) { h2 = .5 }
if(missing(col)) { col='black' }
if(missing(N)) { N = 1 }
if(missing(syn)) { syn=TRUE }
if(REV){ x= rev(x); y = rev(y) }
if(missing(endtol)) { endtol=.1 }
n = length(x)
g = PointsAlong(x, y, N=N, endtol=endtol)
lines(x,y, col=col, ...)
arrows(x[n-1], y[n-1], x[n], y[n], col=col, length = 0.1 )
## g$rot$sn = -g$rot$sn
if(syn)
{
cs = g$rot$cs
sn = -g$rot$sn
}
else
{
cs = -g$rot$cs
sn = g$rot$sn
}
g$rot$cs = sn
g$rot$sn = cs
horseshoe(g$x , g$y , r1=r1, r2=r2, h2=h2, h1=h1, rot=g$rot, col=col)
}
| /R/SynAnticline.R | no_license | cran/GEOmap | R | false | false | 957 | r | `SynAnticline` <-
function(x,y, syn=TRUE, spacing=NULL, N=1, r1= 1, r2= 1.2, h1= 0, h2= 0,endtol=.1,
REV=FALSE, col='black', ...)
{
if(missing(spacing)) spacing=NULL
if(missing(REV)) REV=FALSE
if(missing(r1)) { r1 = 1 }
if(missing(r2)) { r2 = 1.2 }
if(missing(h1)) { h1 = .5 }
if(missing(h2)) { h2 = .5 }
if(missing(col)) { col='black' }
if(missing(N)) { N = 1 }
if(missing(syn)) { syn=TRUE }
if(REV){ x= rev(x); y = rev(y) }
if(missing(endtol)) { endtol=.1 }
n = length(x)
g = PointsAlong(x, y, N=N, endtol=endtol)
lines(x,y, col=col, ...)
arrows(x[n-1], y[n-1], x[n], y[n], col=col, length = 0.1 )
## g$rot$sn = -g$rot$sn
if(syn)
{
cs = g$rot$cs
sn = -g$rot$sn
}
else
{
cs = -g$rot$cs
sn = g$rot$sn
}
g$rot$cs = sn
g$rot$sn = cs
horseshoe(g$x , g$y , r1=r1, r2=r2, h2=h2, h1=h1, rot=g$rot, col=col)
}
|
## Put comments here that give an overall description of what your
## functions do
##This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
## Write a short comment describing this function
##This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
##If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
| /cachematrix.R | no_license | canelbiryol/ProgrammingAssignment2 | R | false | false | 1,249 | r | ## Put comments here that give an overall description of what your
## functions do
##This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
## Write a short comment describing this function
##This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
##If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
theme_dsb_base <- function(default_color = "#1b1b1b", default_text_color = "#fcfcfc", grid_color = "#595959",
base_size = 10, margin = 24) {
axis_text_margin <- margin / 2
dsb_text_default <- theme(
text = element_text(
family = "sans", face = "plain", size = base_size, color = default_text_color,
# linespacing=1.1, , ha='center', va='baseline',
angle = 0,
margin = margin(t = margin, b = margin, l = margin, r = margin)
),
plot.background = element_rect(fill = default_color, color = NA),
panel.border = element_blank(),
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.title = element_text(size = 12, face = "plain"),
title = element_text(size = 12, face = "plain"),
axis.text.x = element_text(angle = 45, hjust = 1, size = 10),
axis.ticks = element_line(size = 0.1),
axis.text.y = element_text(size = 10),
axis.title.x = element_text(size = 12),
axis.title.y = element_text(size = 12),
panel.spacing = unit(.45, "points"),
legend.title = element_text(size = 10),
legend.text = element_text(size = 9),
legend.position = "top",
legend.spacing = unit(0, "points"),
axis.line = element_line(color = "#212121", size = 0.4)
)
return(theme_minimal() + dsb_text_default)
}
theme_dsb_dark <- function() {
dsb_dark <- theme_dsb_base(default_color = "#212121", default_text_color = "#fcfcfc", grid_color = "#595959")
return(dsb_dark)
}
theme_dsb_light <- function() {
dsb_dark <- theme_dsb_base(default_color = "#ffffff", default_text_color = "#212121", grid_color = "#bbbbbb")
return(dsb_dark)
}
| /epiCata/R/dsb_ggplot_theme.R | permissive | Data-Science-Brigade/modelo-epidemiologico-sc | R | false | false | 1,711 | r |
theme_dsb_base <- function(default_color = "#1b1b1b", default_text_color = "#fcfcfc", grid_color = "#595959",
base_size = 10, margin = 24) {
axis_text_margin <- margin / 2
dsb_text_default <- theme(
text = element_text(
family = "sans", face = "plain", size = base_size, color = default_text_color,
# linespacing=1.1, , ha='center', va='baseline',
angle = 0,
margin = margin(t = margin, b = margin, l = margin, r = margin)
),
plot.background = element_rect(fill = default_color, color = NA),
panel.border = element_blank(),
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.title = element_text(size = 12, face = "plain"),
title = element_text(size = 12, face = "plain"),
axis.text.x = element_text(angle = 45, hjust = 1, size = 10),
axis.ticks = element_line(size = 0.1),
axis.text.y = element_text(size = 10),
axis.title.x = element_text(size = 12),
axis.title.y = element_text(size = 12),
panel.spacing = unit(.45, "points"),
legend.title = element_text(size = 10),
legend.text = element_text(size = 9),
legend.position = "top",
legend.spacing = unit(0, "points"),
axis.line = element_line(color = "#212121", size = 0.4)
)
return(theme_minimal() + dsb_text_default)
}
theme_dsb_dark <- function() {
dsb_dark <- theme_dsb_base(default_color = "#212121", default_text_color = "#fcfcfc", grid_color = "#595959")
return(dsb_dark)
}
theme_dsb_light <- function() {
dsb_dark <- theme_dsb_base(default_color = "#ffffff", default_text_color = "#212121", grid_color = "#bbbbbb")
return(dsb_dark)
}
|
context('otherHalting')
set.seed(1991)
testthat::test_that(
"timeLimit"
, {
skip_on_cran()
sf <- function(x,y) 1000 - (x-5)^2 - (y + 10)^2
FUN <- function(x,y) {
return(list(Score = sf(x,y)))
}
bounds = list(
x = c(0,15)
, y = c(-20,100)
)
optObj <- bayesOpt(
FUN
, bounds
, initPoints = 3
, iters.n = 25
, otherHalting = list(timeLimit = 5)
, verbose = 0
)
expect_equal(
optObj$stopStatus
, ParBayesianOptimization:::makeStopEarlyMessage("Time Limit - 5 seconds.")
)
}
)
testthat::test_that(
"minUtility"
, {
skip_on_cran()
sf <- function(x,y) 1000 - (x-5)^2 - (y + 10)^2
FUN <- function(x,y) {
return(list(Score = sf(x,y)))
}
bounds = list(
x = c(0,15)
, y = c(-20,100)
)
optObj <- bayesOpt(
FUN
, bounds
, initPoints = 3
, iters.n = 25
, otherHalting = list(minUtility = 0.1)
, verbose = 0
)
expect_equal(
optObj$stopStatus
, ParBayesianOptimization:::makeStopEarlyMessage("Returning Results. Could not meet minimum required (0.1) utility.")
)
}
)
| /tests/testthat/test-otherHalting.R | no_license | AnotherSamWilson/ParBayesianOptimization | R | false | false | 1,202 | r | context('otherHalting')
set.seed(1991)
testthat::test_that(
"timeLimit"
, {
skip_on_cran()
sf <- function(x,y) 1000 - (x-5)^2 - (y + 10)^2
FUN <- function(x,y) {
return(list(Score = sf(x,y)))
}
bounds = list(
x = c(0,15)
, y = c(-20,100)
)
optObj <- bayesOpt(
FUN
, bounds
, initPoints = 3
, iters.n = 25
, otherHalting = list(timeLimit = 5)
, verbose = 0
)
expect_equal(
optObj$stopStatus
, ParBayesianOptimization:::makeStopEarlyMessage("Time Limit - 5 seconds.")
)
}
)
testthat::test_that(
"minUtility"
, {
skip_on_cran()
sf <- function(x,y) 1000 - (x-5)^2 - (y + 10)^2
FUN <- function(x,y) {
return(list(Score = sf(x,y)))
}
bounds = list(
x = c(0,15)
, y = c(-20,100)
)
optObj <- bayesOpt(
FUN
, bounds
, initPoints = 3
, iters.n = 25
, otherHalting = list(minUtility = 0.1)
, verbose = 0
)
expect_equal(
optObj$stopStatus
, ParBayesianOptimization:::makeStopEarlyMessage("Returning Results. Could not meet minimum required (0.1) utility.")
)
}
)
|
print.ClassificationPlot <- function(x, ...){
cat("\n#~~~~~~~~~~~~~~~~~#\n")
cat("# Results model 1 #\n")
cat("#~~~~~~~~~~~~~~~~~#\n\n")
AUC = round(x$Model1$AUC, 3)
AUC = paste(AUC[1], " (",AUC[3],"-",AUC[4],")",sep="")
cat(paste(" - AUC (95% CI)=", AUC,"\n\n"))
print(x$Model1$Summary)
if(any(names(x)=="Model2")){
cat("\n#~~~~~~~~~~~~~~~~~#\n")
cat("# Results model 2 #\n")
cat("#~~~~~~~~~~~~~~~~~#\n\n")
AUC = round(x$Model2$AUC, 3)
AUC = paste(AUC[1], " (",AUC[3],"-",AUC[4],")",sep="")
cat(paste(" - AUC (95% CI)=", AUC,"\n\n"))
print(x$Model2$Summary)
}
}
| /R/print.functions.R | no_license | mafshar99/ClassificationPlot | R | false | false | 609 | r | print.ClassificationPlot <- function(x, ...){
cat("\n#~~~~~~~~~~~~~~~~~#\n")
cat("# Results model 1 #\n")
cat("#~~~~~~~~~~~~~~~~~#\n\n")
AUC = round(x$Model1$AUC, 3)
AUC = paste(AUC[1], " (",AUC[3],"-",AUC[4],")",sep="")
cat(paste(" - AUC (95% CI)=", AUC,"\n\n"))
print(x$Model1$Summary)
if(any(names(x)=="Model2")){
cat("\n#~~~~~~~~~~~~~~~~~#\n")
cat("# Results model 2 #\n")
cat("#~~~~~~~~~~~~~~~~~#\n\n")
AUC = round(x$Model2$AUC, 3)
AUC = paste(AUC[1], " (",AUC[3],"-",AUC[4],")",sep="")
cat(paste(" - AUC (95% CI)=", AUC,"\n\n"))
print(x$Model2$Summary)
}
}
|
rm(list=ls())
#setwd("")
load("KrukUWr2020.RData")
library(data.table)
set.seed(1)
summary(cases)
summary(events)
Cases <- data.table(cases)
Events <- data.table(events)
# Próbka
# Cases <- Cases[sample((1:dim(Cases)[1]),0.5*dim(Cases)[1])]
# Decoding variables
Cases[,CreditCard := ifelse(Product=="Credit card",1,0)]
Cases[,Female := ifelse(Gender=="FEMALE",1,0)]
# Handling missing data
Variables = c( "LoanAmount",
"TOA",
"Principal",
"Interest",
"Other",
"D_ContractDateToImportDate",
"DPD",
"PopulationInCity",
"Age",
"LastPaymentAmount",
"M_LastPaymentToImportDate",
"GDPPerCapita",
"MeanSalary",
"CreditCard",
"Female",
"Bailiff",
"ClosedExecution"
)
nullCounts <- lapply(Cases[,.SD,.SDcols=Variables], function(x) sum(is.na(x)))
# Imputation with avg
variables <- c( "LoanAmount",
"TOA",
"Principal",
"Interest",
"Other",
"D_ContractDateToImportDate",
"DPD",
"PopulationInCity",
"Age",
"LastPaymentAmount",
"M_LastPaymentToImportDate",
"GDPPerCapita",
"MeanSalary"
)
for (variable in variables) { ## variable = 'Age'
if (eval(parse(text=paste("nullCounts$",variable,sep=""))) > 0) {
avg <- eval(parse(text=paste("mean(Cases[,",variable,"],na.rm=TRUE)",sep="")))
eval(parse(text=paste("Cases[is.na(",variable,"), ",variable,":=avg]",sep="")))
}
}
# Other imputation
summary(Cases)
Cases[is.na(Female),Female:= ifelse(runif(nullCounts$Female,0,1)<Cases[,mean(Female,na.rm=TRUE)],1L,0L)]
Cases[is.na(Bailiff),Bailiff:= ifelse(runif(nullCounts$Bailiff,0,1)<Cases[,mean(Bailiff,na.rm=TRUE)],1L,0L)]
Cases[is.na(ClosedExecution) & Bailiff==0, ClosedExecution:= 0L]
Cases[is.na(ClosedExecution), ClosedExecution:= ifelse(runif(dim(Cases[is.na(ClosedExecution),])[1],0,1)<Cases[,mean(ClosedExecution,na.rm=TRUE)],1L,0L)]
# Proportion of tail data to be removed from the dataset
summary(Cases)
Proportion = 0.001
Cases <- Cases[LoanAmount<quantile(Cases[,LoanAmount], probs=1-Proportion),]
Cases <- Cases[DPD<quantile(Cases[,DPD], probs=1-Proportion),]
Cases <- Cases[LastPaymentAmount<quantile(Cases[,LastPaymentAmount], probs=1-Proportion),]
# Cecha modelowana regresyjnie za pomocą NN - SR12M
setkey(Cases,CaseId)
setkey(Events,CaseId)
Payments <- Events[Month <= 12,.(P12M = sum(ifelse(is.na(PaymentAmount),0,PaymentAmount)), Qty12M = sum(ifelse(is.na(PaymentAmount),0,1))),by=.(CaseId)]
setkey(Payments,CaseId)
Cases <- Cases[Payments[,.(CaseId,P12M,Qty12M)],nomatch=0][,Client := 'B']
Cases[P12M*1.0/TOA > 0.005 | Qty12M >= 3, Client := 'G']
Cases[, Good:=ifelse(Client=='G',1,0)]
Cases[, SR12M:=P12M*1.0/TOA]
Cases <- Cases[SR12M<quantile(Cases[,SR12M], probs=1-Proportion),]
Cases <- Cases[SR12M >= 0,]
summary(Cases)
# Korelacja
library(corrplot)
corrplot(cor(Cases[,.SD,.SDcols = Variables]), order = "hclust", tl.col='black', tl.cex=.75)
# Włączenie H2O - prosto z dokumentacji modułu DeepLearning H2O
# The following two commands remove any previously installed H2O packages for R.
if ("package:h2o" %in% search()) { detach("package:h2o", unload=TRUE) }
if ("h2o" %in% rownames(installed.packages())) { remove.packages("h2o") }
# Next, we download packages that H2O depends on.
pkgs <- c("RCurl","jsonlite")
for (pkg in pkgs) {
if (! (pkg %in% rownames(installed.packages()))) { install.packages(pkg) }
}
# Now we download, install and initialize the H2O package for R.
install.packages("h2o", type="source", repos="http://h2o-release.s3.amazonaws.com/h2o/rel-yau/10/R")
# Finally, let's load H2O and start up an H2O cluster
library(h2o)
h2o.init(nthreads = -1)
# Wybór cech
Variables <- c( "LoanAmount",
"TOA",
#"Principal",
#"Interest",
#"Other",
"D_ContractDateToImportDate",
"DPD",
"PopulationInCity",
"Age",
"LastPaymentAmount",
"M_LastPaymentToImportDate",
"GDPPerCapita",
"MeanSalary"
#"CreditCard",
#"Female",
#"Bailiff",
#"ClosedExecution"
)
# Zbiór trn/val
n <- Cases[, .N]
indexTrn <- sample(1:n, 0.5*n)
CasesTrn <- Cases[indexTrn,]
CasesTst <- Cases[-indexTrn,]
summary(CasesTrn)
summary(CasesTst)
# Sztuczne portfele w zbiorze Tst (tu dla uproszczenia uzyskane w wyniku analizy skupień - jedno skupienie traktowane jak jeden portfel)
library(cluster)
skupienia = clara(CasesTst[,.SD,.SDcols = Variables], k = 15, metric = "euclidean", stand = FALSE, samples = 5, sampsize = 1000, trace = 0, medoids.x = TRUE, keep.data = FALSE, rngR = TRUE)
#skupienia$clusinfo #Info o skupieniach
#skupienia$medoids #Współrzędne medoidów
#skupienia$i.med #Medoidy
#skupienia$clustering #NumerySkupień
CasesTstP <- copy(CasesTst)
CasesTstP[,skupienie := skupienia$clustering]
# Skupienia jako portfele
CasesTstP[,.N,by=skupienie]
library(scatterplot3d)
library(rgl)
library(car)
CasesStd <- data.table(scale(CasesTstP[,.SD,.SDcols = Variables]))
pca <- prcomp(CasesStd, center = FALSE, scale = FALSE)
CasesPCA <- data.table(as.matrix(CasesStd) %*% pca$rotation)
CasesPCA[, skupienie:=CasesTstP[,skupienie]]
scatter3d(x = CasesPCA[,PC1], y = CasesPCA[,PC2], z = CasesPCA[,PC3], groups = as.factor(CasesPCA[,skupienie]), surface=FALSE)
# Cechy objaśniana i objaśniające na potrzeby NN
y <- 'SR12M'
x <- Variables
# Wczytanie zbiorów jako h2o
train <- as.h2o(CasesTrn)
val <- as.h2o(CasesTst)
valP <- as.h2o(CasesTstP)
# Przeszukiwanie po kracie (Cartesian Grid Search)
# hyperparameters
hidden_opt <- list(c(10,10), c(10))
l1_opt <- c(1e-4,1e-3)
epochs_opt <- c(5,10,30,100)
input_dropout_ratio_opt = c(0.1, 0.2, 0.3, 0.4)
hidden_dropout_ratio_opt = c(0.1, 0.2, 0.3, 0.4)
# lista hiperparametrów
hyper_params <- list(hidden = hidden_opt, l1 = l1_opt, epochs = epochs_opt, hidden_dropout_ratios = hidden_dropout_ratio_opt, input_dropout_ratio = input_dropout_ratio_opt)
#oszacowanie "po kracie"
model_grid <- h2o.grid("deeplearning",
grid_id = "uwr3",
hyper_params = hyper_params,
x = x,
y = y,
distribution = "gaussian",
loss = "Quadratic",
activation = "RectifierWithDropout",
training_frame = train,
validation_frame = val,
#rate = 0.2,
score_interval = 2,
stopping_rounds = 3,
stopping_tolerance = 0.05,
stopping_metric = "MAE")
head(model_grid@summary_table)
tail(model_grid@summary_table)
# Podsumowanie kraty i dodanie informacji o odchyleniu per portfel
# Jest to realizacja podejścia: model per sprawa -> struktura porfeli na zbiorze Val/Tst -> odchylenie per portfel
model_grid_result <- data.table(model_grid@summary_table)
summary <- data.table()
for (i in 1:dim(model_grid_result)[1]) { #i=1
# Predykcja
model <- h2o.getModel(model_grid_result[i,model_ids])
forecast <- h2o.predict(model, newdata = valP)
as.data.frame(forecast$predict)
CasesTstP <- data.table(cbind(CasesTstP, as.data.frame(forecast$predict)))
# Obliczenie średniego odchylenia portfeli
dev <- mean(CasesTstP[,.(dev=(abs(sum(P12M)-sum(predict*TOA)))/sum(P12M)),by=skupienie][,dev])
# Zebranie wyników (residual deviance to MSE per sprawa, dev12M to obliczone wyżej dev, czyli średnie odchylenie SR12M per portfel)
summary <- rbind(summary,data.table(
id=model_grid_result[i,model_ids],
epochs=model_grid_result[i,epochs],
hidden=model_grid_result[i,hidden],
input_dropout_ratio=model_grid_result[i,input_dropout_ratio],
l1=model_grid_result[i,l1],
residual_deviance=as.numeric(model_grid_result[i,residual_deviance]),
dev12M=dev
))
CasesTstP[,predict:=NULL]
}
# Sprawdzenie zależności odchylenia per sprawa vs odchylenie per portfel - widoczna korelacja. Wybieramy hiperparametryzację dającą możliwie najmniejsze oba rodzaje błędu.
plot(summary$residual_deviance,summary$dev12M)
| /ListyZadan/10_ListaRozwiazaniaNN.r | no_license | ArekSobol/Lab2020 | R | false | false | 9,513 | r |
rm(list=ls())
#setwd("")
load("KrukUWr2020.RData")
library(data.table)
set.seed(1)
summary(cases)
summary(events)
Cases <- data.table(cases)
Events <- data.table(events)
# Próbka
# Cases <- Cases[sample((1:dim(Cases)[1]),0.5*dim(Cases)[1])]
# Decoding variables
Cases[,CreditCard := ifelse(Product=="Credit card",1,0)]
Cases[,Female := ifelse(Gender=="FEMALE",1,0)]
# Handling missing data
Variables = c( "LoanAmount",
"TOA",
"Principal",
"Interest",
"Other",
"D_ContractDateToImportDate",
"DPD",
"PopulationInCity",
"Age",
"LastPaymentAmount",
"M_LastPaymentToImportDate",
"GDPPerCapita",
"MeanSalary",
"CreditCard",
"Female",
"Bailiff",
"ClosedExecution"
)
nullCounts <- lapply(Cases[,.SD,.SDcols=Variables], function(x) sum(is.na(x)))
# Imputation with avg
variables <- c( "LoanAmount",
"TOA",
"Principal",
"Interest",
"Other",
"D_ContractDateToImportDate",
"DPD",
"PopulationInCity",
"Age",
"LastPaymentAmount",
"M_LastPaymentToImportDate",
"GDPPerCapita",
"MeanSalary"
)
for (variable in variables) { ## variable = 'Age'
if (eval(parse(text=paste("nullCounts$",variable,sep=""))) > 0) {
avg <- eval(parse(text=paste("mean(Cases[,",variable,"],na.rm=TRUE)",sep="")))
eval(parse(text=paste("Cases[is.na(",variable,"), ",variable,":=avg]",sep="")))
}
}
# Other imputation
summary(Cases)
Cases[is.na(Female),Female:= ifelse(runif(nullCounts$Female,0,1)<Cases[,mean(Female,na.rm=TRUE)],1L,0L)]
Cases[is.na(Bailiff),Bailiff:= ifelse(runif(nullCounts$Bailiff,0,1)<Cases[,mean(Bailiff,na.rm=TRUE)],1L,0L)]
Cases[is.na(ClosedExecution) & Bailiff==0, ClosedExecution:= 0L]
Cases[is.na(ClosedExecution), ClosedExecution:= ifelse(runif(dim(Cases[is.na(ClosedExecution),])[1],0,1)<Cases[,mean(ClosedExecution,na.rm=TRUE)],1L,0L)]
# Proportion of tail data to be removed from the dataset
summary(Cases)
Proportion = 0.001
Cases <- Cases[LoanAmount<quantile(Cases[,LoanAmount], probs=1-Proportion),]
Cases <- Cases[DPD<quantile(Cases[,DPD], probs=1-Proportion),]
Cases <- Cases[LastPaymentAmount<quantile(Cases[,LastPaymentAmount], probs=1-Proportion),]
# Cecha modelowana regresyjnie za pomocą NN - SR12M
setkey(Cases,CaseId)
setkey(Events,CaseId)
Payments <- Events[Month <= 12,.(P12M = sum(ifelse(is.na(PaymentAmount),0,PaymentAmount)), Qty12M = sum(ifelse(is.na(PaymentAmount),0,1))),by=.(CaseId)]
setkey(Payments,CaseId)
Cases <- Cases[Payments[,.(CaseId,P12M,Qty12M)],nomatch=0][,Client := 'B']
Cases[P12M*1.0/TOA > 0.005 | Qty12M >= 3, Client := 'G']
Cases[, Good:=ifelse(Client=='G',1,0)]
Cases[, SR12M:=P12M*1.0/TOA]
Cases <- Cases[SR12M<quantile(Cases[,SR12M], probs=1-Proportion),]
Cases <- Cases[SR12M >= 0,]
summary(Cases)
# Korelacja
library(corrplot)
corrplot(cor(Cases[,.SD,.SDcols = Variables]), order = "hclust", tl.col='black', tl.cex=.75)
# Włączenie H2O - prosto z dokumentacji modułu DeepLearning H2O
# The following two commands remove any previously installed H2O packages for R.
if ("package:h2o" %in% search()) { detach("package:h2o", unload=TRUE) }
if ("h2o" %in% rownames(installed.packages())) { remove.packages("h2o") }
# Next, we download packages that H2O depends on.
pkgs <- c("RCurl","jsonlite")
for (pkg in pkgs) {
if (! (pkg %in% rownames(installed.packages()))) { install.packages(pkg) }
}
# Now we download, install and initialize the H2O package for R.
install.packages("h2o", type="source", repos="http://h2o-release.s3.amazonaws.com/h2o/rel-yau/10/R")
# Finally, let's load H2O and start up an H2O cluster
library(h2o)
h2o.init(nthreads = -1)
# Wybór cech
Variables <- c( "LoanAmount",
"TOA",
#"Principal",
#"Interest",
#"Other",
"D_ContractDateToImportDate",
"DPD",
"PopulationInCity",
"Age",
"LastPaymentAmount",
"M_LastPaymentToImportDate",
"GDPPerCapita",
"MeanSalary"
#"CreditCard",
#"Female",
#"Bailiff",
#"ClosedExecution"
)
# Zbiór trn/val
n <- Cases[, .N]
indexTrn <- sample(1:n, 0.5*n)
CasesTrn <- Cases[indexTrn,]
CasesTst <- Cases[-indexTrn,]
summary(CasesTrn)
summary(CasesTst)
# Sztuczne portfele w zbiorze Tst (tu dla uproszczenia uzyskane w wyniku analizy skupień - jedno skupienie traktowane jak jeden portfel)
library(cluster)
skupienia = clara(CasesTst[,.SD,.SDcols = Variables], k = 15, metric = "euclidean", stand = FALSE, samples = 5, sampsize = 1000, trace = 0, medoids.x = TRUE, keep.data = FALSE, rngR = TRUE)
#skupienia$clusinfo #Info o skupieniach
#skupienia$medoids #Współrzędne medoidów
#skupienia$i.med #Medoidy
#skupienia$clustering #NumerySkupień
CasesTstP <- copy(CasesTst)
CasesTstP[,skupienie := skupienia$clustering]
# Skupienia jako portfele
CasesTstP[,.N,by=skupienie]
library(scatterplot3d)
library(rgl)
library(car)
CasesStd <- data.table(scale(CasesTstP[,.SD,.SDcols = Variables]))
pca <- prcomp(CasesStd, center = FALSE, scale = FALSE)
CasesPCA <- data.table(as.matrix(CasesStd) %*% pca$rotation)
CasesPCA[, skupienie:=CasesTstP[,skupienie]]
scatter3d(x = CasesPCA[,PC1], y = CasesPCA[,PC2], z = CasesPCA[,PC3], groups = as.factor(CasesPCA[,skupienie]), surface=FALSE)
# Cechy objaśniana i objaśniające na potrzeby NN
y <- 'SR12M'
x <- Variables
# Wczytanie zbiorów jako h2o
train <- as.h2o(CasesTrn)
val <- as.h2o(CasesTst)
valP <- as.h2o(CasesTstP)
# Przeszukiwanie po kracie (Cartesian Grid Search)
# hyperparameters
hidden_opt <- list(c(10,10), c(10))
l1_opt <- c(1e-4,1e-3)
epochs_opt <- c(5,10,30,100)
input_dropout_ratio_opt = c(0.1, 0.2, 0.3, 0.4)
hidden_dropout_ratio_opt = c(0.1, 0.2, 0.3, 0.4)
# lista hiperparametrów
hyper_params <- list(hidden = hidden_opt, l1 = l1_opt, epochs = epochs_opt, hidden_dropout_ratios = hidden_dropout_ratio_opt, input_dropout_ratio = input_dropout_ratio_opt)
#oszacowanie "po kracie"
model_grid <- h2o.grid("deeplearning",
grid_id = "uwr3",
hyper_params = hyper_params,
x = x,
y = y,
distribution = "gaussian",
loss = "Quadratic",
activation = "RectifierWithDropout",
training_frame = train,
validation_frame = val,
#rate = 0.2,
score_interval = 2,
stopping_rounds = 3,
stopping_tolerance = 0.05,
stopping_metric = "MAE")
head(model_grid@summary_table)
tail(model_grid@summary_table)
# Podsumowanie kraty i dodanie informacji o odchyleniu per portfel
# Jest to realizacja podejścia: model per sprawa -> struktura porfeli na zbiorze Val/Tst -> odchylenie per portfel
model_grid_result <- data.table(model_grid@summary_table)
summary <- data.table()
for (i in 1:dim(model_grid_result)[1]) { #i=1
# Predykcja
model <- h2o.getModel(model_grid_result[i,model_ids])
forecast <- h2o.predict(model, newdata = valP)
as.data.frame(forecast$predict)
CasesTstP <- data.table(cbind(CasesTstP, as.data.frame(forecast$predict)))
# Obliczenie średniego odchylenia portfeli
dev <- mean(CasesTstP[,.(dev=(abs(sum(P12M)-sum(predict*TOA)))/sum(P12M)),by=skupienie][,dev])
# Zebranie wyników (residual deviance to MSE per sprawa, dev12M to obliczone wyżej dev, czyli średnie odchylenie SR12M per portfel)
summary <- rbind(summary,data.table(
id=model_grid_result[i,model_ids],
epochs=model_grid_result[i,epochs],
hidden=model_grid_result[i,hidden],
input_dropout_ratio=model_grid_result[i,input_dropout_ratio],
l1=model_grid_result[i,l1],
residual_deviance=as.numeric(model_grid_result[i,residual_deviance]),
dev12M=dev
))
CasesTstP[,predict:=NULL]
}
# Sprawdzenie zależności odchylenia per sprawa vs odchylenie per portfel - widoczna korelacja. Wybieramy hiperparametryzację dającą możliwie najmniejsze oba rodzaje błędu.
plot(summary$residual_deviance,summary$dev12M)
|
#' Decompose an edge list
#'
#' Generates two data frames (nodes and edges) from a list of edges
#'
#' `edge.list` transforms the input into a two-elements list containing a
#' dataframe of nodes (with columns \dQuote{id} and \dQuote{label}) and a
#' dataframe of edges. The last one is numeric (with columns \dQuote{source}
#' and \dQuote{target}) and based on autogenerated nodes' ids.
#'
#' @param x A matrix or data frame structured as a list of edges
#' @return A list containing two data frames.
#' @author George Vega Yon
#'
#' Jorge Fabrega Lacoa
#' @keywords manip
#' @examples
#'
#' edgelist <- matrix(
#' c("matthew","john",
#' "max","stephen",
#' "matthew","stephen"),
#' byrow=TRUE, ncol=2)
#'
#' edge.list(edgelist)
#'
#' @export
edge.list <- function(x) {
################################################################################
# Translate a edgelist to two objects list (nodes + edges)
################################################################################
objClass <- class(x)
k <- ncol(x)
if (any(c("matrix", "data.frame") %in% objClass)) {
if (k == 2) {
# If it is not a factor
if (!is.factor(x)) x <- factor(c(x[,1], x[,2]))
edges <- matrix(unclass(x), byrow=FALSE, ncol=2)
colnames(edges) <- c("source","target")
nodes <- data.frame(id=1:nlevels(x), label=levels(x), stringsAsFactors=FALSE)
return(list(nodes=nodes, edges=edges))
}
else stop("Insufficient number of columns (", k,")")
}
else stop("-", objClass,
"- class not supported, try with a \"matrix\" or a \"data.frame\"")
}
.defAtt <- function(x, parent) {
################################################################################
# Prints the nodes and edges att definition
################################################################################
apply(x, MARGIN=1,
function(x, PAR) {
XML::newXMLNode(name="attribute", parent=PAR, attrs=x)
}, PAR=parent)
}
.addAtts <- function(tmpatt, attvec, tmpdoc=NULL) {
################################################################################
# Builds app proper XML attrs statement to be parsed by parseXMLAndAdd
################################################################################
tmpatt <- data.frame(
"for"=paste("att",attvec,sep=""),
value=unlist(tmpatt, recursive=FALSE), check.names=FALSE
)
for (i in attvec)
tmpdoc <- c(tmpdoc, .writeXMLLine("attvalue", tmpatt[i,,drop=FALSE ]) , sep="")
paste(c("<attvalues>", tmpdoc, "</attvalues>"), sep="", collapse="")
}
.writeXMLLine <- function(type, obj, finalizer=TRUE) {
################################################################################
# Builds as character whatever XML line is needed
################################################################################
paste("<", type, " " ,
paste(colnames(obj)[!is.na(obj)],obj[!is.na(obj)], sep="=\"", collapse="\" "),
ifelse(finalizer, "\"/>","\">"), sep="")
}
.addNodesEdges <- function(dataset, PAR, type="node", doc) {
################################################################################
# Prints the nodes and edges
################################################################################
n <- NROW(dataset)
vec <- 1:n
xvars <- colnames(dataset)
noattnames <- xvars[grep("(^att[0-9])|(^viz[.])", xvars, invert=T)]
datasetnoatt <- dataset[, noattnames, drop=FALSE]
# Parsing user-define attributes
if (attributes <- length(grep("^att", xvars)) > 0) {
attnames <- colnames(dataset)[grep("^att", xvars)]
att <- dataset[,attnames, drop=FALSE]
attvec <- 1:length(attnames)
}
# Parsing VIZ attributes
if ((vizattributes <- length(grep("^viz[.]", xvars)) > 0)) {
vizattnames <- colnames(dataset)[grep("^viz[.]", xvars)]
# Color atts
if ((vizcolors <- any(grepl("^viz[.]color",vizattnames)))) {
vizcol.df <- dataset[,grep("^viz[.]color[.]", vizattnames, value=TRUE)]
colnames(vizcol.df) <- gsub("^viz[.]color[.]", "", colnames(vizcol.df))
}
# Pos att
if ((vizposition <- any(grepl("^viz[.]position",vizattnames)))) {
vizpos.df <- dataset[,grep("^viz[.]position[.]", vizattnames, value=TRUE), drop=FALSE]
colnames(vizpos.df) <- gsub("^viz[.]position[.]", "", colnames(vizpos.df))
}
# Size att
if ((vizsize <- any(grepl("^viz[.]size",vizattnames)))) {
vizsiz.df <- dataset[,grep("^viz[.]size[.]", vizattnames, value=TRUE), drop=FALSE]
colnames(vizsiz.df) <- gsub("^viz[.]size[.]", "", colnames(vizsiz.df))
}
# Shape att
if ((vizshape <- any(grepl("^viz[.]shape",vizattnames)))) {
vizshp.df <- dataset[,grep("^viz[.]shape[.]", vizattnames, value=TRUE), drop=FALSE]
colnames(vizshp.df) <- gsub("^viz[.]shape[.]", "", colnames(vizshp.df))
}
# Image att
if ((vizimage <- any(grepl("^viz[.]image",vizattnames)))) {
vizimg.df <- dataset[,grep("^viz[.]image[.]", vizattnames, value=TRUE), drop=FALSE]
colnames(vizimg.df) <- c("value", "uri")
}
# Thickness att
if ((viztness <- any(grepl("^viz[.]size",vizattnames)))) {
vizthk.df <- dataset[,grep("^viz[.]size[.]", vizattnames, value=TRUE), drop=FALSE]
colnames(vizthk.df) <- gsub("^viz[.]size[.]", "", colnames(vizthk.df))
}
}
# Free memory
rm(dataset)
# Loop if there are not any attributes
if (!attributes && !vizattributes) {
for (i in vec) {
XML::parseXMLAndAdd(.writeXMLLine(type, datasetnoatt[i,,drop=FALSE]),parent=PAR)
}
return(NULL)
}
# Loop if only there are attributes
if (attributes && !vizattributes) {
for (i in vec) {
# Adding directly
XML::parseXMLAndAdd(
paste(.writeXMLLine(type, datasetnoatt[i,,drop=FALSE], finalizer=FALSE),
.addAtts(att[i,], attvec), # Builds atts definition
"</",type,">",sep=""),
parent=PAR)
}
return(NULL)
}
# Loop if there are attributes and viz attributes
for (i in vec) {
# Node/Edge + Atts
if (attributes) {
tempnode0 <- paste(
.writeXMLLine(type, datasetnoatt[i,,drop=FALSE], finalizer=FALSE),
.addAtts(att[i,], attvec), sep="")
}
else tempnode0 <- .writeXMLLine(type, datasetnoatt[i,,drop=FALSE], finalizer=FALSE)
# Viz Att printing
# Colors
if (vizcolors) {
tempnode0 <- paste(tempnode0, .writeXMLLine("color", vizcol.df[i,,drop=FALSE]),
sep="")
}
# Position
if (vizposition) {
tempnode0 <- paste(tempnode0, .writeXMLLine("position", vizpos.df[i,,drop=FALSE]),
sep="")
}
# Size
if (vizsize) {
tempnode0 <- paste(tempnode0, .writeXMLLine("size", vizsiz.df[i,1, drop=FALSE]),
sep="")
}
# Shape
if (vizshape) {
tempnode0 <- paste(tempnode0, .writeXMLLine("shape", vizshp.df[i,1,drop=FALSE]),
sep="")
}
# Image
if (vizimage) {
tempnode0 <- paste(tempnode0, .writeXMLLine("shape", vizimg.df[i,,drop=FALSE]),
sep="")
}
XML::parseXMLAndAdd(sprintf("%s</%s>",tempnode0, type), parent=PAR)
}
return(NULL)
}
#' Creates an object of class `gexf`
#'
#' Takes a `node` matrix (or dataframe) and an
#' `edge` matrix (or dataframe) and creates a `gexf` object
#' containing a data-frame representation and a gexf representation of a graph.
#'
#' @details
#' Just like `nodesVizAtt` and `edgesVizAtt`, `nodesAtt` and
#' `edgesAtt` must have the same number of rows as nodes and edges,
#' respectively. Using data frames is necessary as in this way data types are
#' preserved.
#'
#' `nodesVizAtt` and `edgesVizAtt` allow using visual attributes such
#' as color, position (nodes only), size (nodes only), thickness (edges only)
#' shape and image (nodes only). \itemize{ \item Color is defined by the RGBA
#' color model, thus for every node/edge the color should be specified through
#' a data-frame with columns *r* (red), *g* (green), *b* (blue)
#' with integers between 0 and 256 and a last column with *alpha* values
#' as a float between 0.0 and 1.0. \item Position, for every node, it is a
#' three-column data-frame including *x*, *y* and *z*
#' coordinates. The three components must be float. \item Size as a numeric
#' colvector (float values). \item Thickness (see size). \item Node Shape
#' (string), currently unsupported by Gephi, can take the values of
#' *disk*, *square*, *triangle*, *diamond* and
#' *image*. \item Edge Shape (string), currently unsupported by Gephi, can
#' take the values of *solid*, *dotted*, *dashed* and
#' *double*. \item Image (string), currently unsupported by Gephi,
#' consists on a vector of strings representing URIs. }
#'
#' `nodeDynamic` and `edgeDynamic` allow to draw dynamic graphs. It
#' should contain two columns *start* and *end*, both allowing
#' `NA` value. It can be use jointly with `tFormat` which by default
#' is set as \dQuote{double}. Currently accepted time formats are: \itemize{
#' \item Integer or double. \item International standard *date*
#' yyyy-mm-dd. \item dateTime W3 XSD
#' (http://www.w3.org/TR/xmlschema-2/#dateTime). }
#'
#' @param nodes A two-column data-frame or matrix of \dQuote{id}s and
#' \dQuote{label}s representing nodes.
#' @param edges A two-column data-frame or matrix containing \dQuote{source}
#' and \dQuote{target} for each edge. Source and target values are based on the
#' nodes ids.
#' @param edgesId A one-column data-frame, matrix or vector.
#' @param edgesLabel A one-column data-frame, matrix or vector.
#' @param edgesAtt A data-frame with one or more columns representing edges'
#' attributes.
#' @param edgesWeight A numeric vector containing edges' weights.
#' @param edgesVizAtt List of three or less viz attributes such as color, size
#' (thickness) and shape of the edges (see details)
#' @param nodesAtt A data-frame with one or more columns representing nodes'
#' attributes
#' @param nodesVizAtt List of four or less viz attributes such as color,
#' position, size and shape of the nodes (see details)
#' @param nodeDynamic A two-column matrix or data-frame. The first column
#' indicates the time at which a given node starts; the second one shows when
#' it ends. The matrix or data-frame must have the same number of rows than the
#' number of nodes in the graph.
#' @param edgeDynamic A two-column matrix or data-frame. The fist column
#' indicates the time at which a given edge stars; the second one shows when it
#' ends. The matrix or dataframe must have the same number of rows than the
#' number of edges in the graph.
#' @param digits Integer. Number of decimals to keep for nodes/edges sizes. See
#' [print.default()]
#' @param output String. The complete path (including filename) where to export
#' the graph as a GEXF file.
#' @param tFormat String. Time format for dynamic graphs (see details)
#' @param defaultedgetype \dQuote{directed}, \dQuote{undirected},
#' \dQuote{mutual}
#' @param meta A List. Meta data describing the graph
#' @param keepFactors Logical, whether to handle factors as numeric values
#' (`TRUE`) or as strings (`FALSE`) by using `as.character`.
#' @param encoding Encoding of the graph.
#' @param vers Character scalar. Version of the GEXF format to generate.
#' By default `"1.3"`.
#' @param ... Passed to `gexf`.
#' @param rescale.node.size Logical scalar. When `TRUE` it rescales the
#' size of the vertices such that the largest one is about \%5 of the plotting
#' region.
#' @return A `gexf` class object (list). Contains the following: \itemize{
#' \item `meta` : (list) Meta data describing the graph. \item
#' `mode` : (list) Sets the default edge type and the graph mode. \item
#' `atts.definitions`: (list) Two data-frames describing nodes and edges
#' attributes. \item `nodesVizAtt` : (data-frame) A multi-column
#' data-frame with the nodes' visual attributes. \item `edgesVizAtt` :
#' (data-frame) A multi-column data-frame with the edges' visual attributes.
#' \item `nodes` : (data-frame) A two-column data-frame with nodes' ids
#' and labels. \item `edges` : (data-frame) A five-column data-frame with
#' edges' ids, labels, sources, targets and weights. \item `graph` :
#' (String) GEXF (XML) representation of the graph. }
#' @author George Vega Yon
#'
#' Jorge Fabrega Lacoa
#' @seealso [new.gexf.graph()]
#' @references The GEXF project website: http://gexf.net
#' @keywords IO
#' @examples
#'
#' \dontrun{
#' demo(gexf) # Example of gexf command using fictional data.
#' demo(gexfattributes) # Working with attributes.
#' demo(gexfbasic) # Basic net.
#' demo(gexfdynamic) # Dynamic net.
#' demo(edge.list) # Working with edges lists.
#' demo(gexffull) # All the package.
#' demo(gexftwitter) # Example with real data of chilean twitter accounts.
#' demo(gexfdynamicandatt) # Dynamic net with static attributes.
#' demo(gexfbuildfromscratch) # Example building a net from scratch.
#' }
#'
#' @name gexf-class
NULL
default_nodeVizAtt <- list(
size = function() 8.0,
position = function() structure(c(runif(2, -200, 200), 0), names = c("x", "y", "z")),
color = function() "tomato"
)
set_default_nodeVizAtt <- function() {
# Getting parameters
env <- parent.frame()
n <- nrow(env[["nodes"]])
# Checking viz attributes
for (att in names(default_nodeVizAtt))
if (!length(env[["nodesVizAtt"]][[att]])) {
env[["nodesVizAtt"]][[att]] <- do.call(
rbind,
lapply(1L:n, function(i) default_nodeVizAtt[[att]]())
)
}
}
#' @export
#' @rdname gexf-class
gexf <- function(
################################################################################
# Prints the gexf file
################################################################################
nodes,
edges,
edgesLabel=NULL,
edgesId=NULL,
edgesAtt=NULL,
edgesWeight=NULL,
edgesVizAtt = list(color=NULL, size=NULL, shape=NULL),
nodesAtt=NULL,
nodesVizAtt = list(color=NULL, position=NULL, size=NULL, shape=NULL, image=NULL),
nodeDynamic=NULL,
edgeDynamic=NULL,
digits=getOption("digits"),
output = NA,
tFormat="double",
defaultedgetype = "undirected",
meta = list(
creator = "NodosChile",
description = "A GEXF file written in R with \"rgexf\"",
keywords = "GEXF, NodosChile, R, rgexf, Gephi"
),
keepFactors = FALSE,
encoding = "UTF-8",
vers = "1.3",
rescale.node.size = TRUE
) {
##############################################################################
# CLASS CHECKS AND OTHERS CHECKS
# Nodes
if (inherits(nodes, c("data.frame", "matrix"))) {
if (ncol(nodes) != 2)
stop("-nodes- should have two columns not ", ncol(nodes))
}
else
stop("Invalid object type: -nodes- should be a two column data.frame or a matrix")
# Edges
if (inherits(edges, c("data.frame", "matrix"))) {
if (ncol(edges) != 2)
stop("-edges- should have two columns not ", ncol(edges))
}
else
stop("Invalid object type: -edges- should be a two column data.frame or a matrix")
# version
vers <- gexf_version(vers)
n <- nrow(nodes)
m <- nrow(edges)
# Edges Label
.parseEdgesLabel(edgesLabel, edges)
# Parsing Edges Id
edgesId <- .parseEdgesId(edgesId, edges)
# Parsing Edges Att
nEdgesAtt <- .parseEdgesAtt(edgesAtt, edges)
# Parsing edges Weight
.parseEdgesWeight(edgesWeight, edges)
# Parsing edges Viz Att
edgesVizAtt <- if (length(unlist(edgesVizAtt))) {
nodesVizAtt <- nodesVizAtt[sapply(edgesVizAtt, length) > 0]
Map(function(a, b) parseVizAtt(a, b, m, "edges"), a=names(edgesVizAtt),
b=edgesVizAtt)
} else NULL
nEdgesVizAtt <- length(edgesVizAtt)
# nEdgesVizAtt <- .parseEdgesVizAtt(edgesVizAtt, edges)
# Nodes Att
nNodesAtt <- .parseNodesAtt(nodesAtt, nodes)
# Parsing nodes Viz Atts
set_default_nodeVizAtt()
# Rescaling vertex sizes if required
if (rescale.node.size) {
# Getting ranges
sscale <- apply(nodesVizAtt$position, 2, range)
sscale <- max(sscale[2 ,] - sscale[1 ,])*.01
nodesVizAtt$size <- nodesVizAtt$size/max(nodesVizAtt$size)*sscale
}
nodesVizAtt <- if (length(unlist(nodesVizAtt))) {
# # Removing empty ones
nodesVizAtt <- nodesVizAtt[sapply(nodesVizAtt, length) > 0]
Map(function(a, b) parseVizAtt(a, b, n, "nodes"), a=names(nodesVizAtt),
b=nodesVizAtt)
} else NULL
nNodesVizAtt <- length(nodesVizAtt)
# Checking the number of digits
if (!is.integer(digits)) stop("Invalid number of digits ",digits,
".\n Must be a number between 0 and 22")
fmt <- sprintf("%%.%gg", digits)
# Dynamics
dynamic <- c(FALSE, FALSE)
if (length(nodeDynamic) > 0) {
if (is.data.frame(nodeDynamic) | is.matrix(nodeDynamic)) {
if (NROW(nodeDynamic) == NROW(nodes)) dynamic[1] <- TRUE
else stop("Insufficient number of rows: -nodeDynamic- (",NROW(nodeDynamic),
" rows) should have the same number of rows than nodes there are (",
NROW(nodes),")")
} else stop("Invalid object type: -nodeDynamic- should be a two columns data.frame or a matrix")
}
if (length(edgeDynamic) > 0) {
if (is.data.frame(edgeDynamic) | is.matrix(edgeDynamic)) {
if (NROW(edgeDynamic) == NROW(edges)) dynamic[2] <- TRUE
else stop("Insufficient number of rows: -edgeDynamic- (",NROW(edgeDynamic), " rows) should have the same number of rows than edges there are (", NROW(edges),")")
} else stop("Invalid object type: -edgeDynamic- should be a two columns data.frame or a matrix")
}
##############################################################################
# Strings
old.strAF <- getOption("stringsAsFactors")
on.exit(options(stringsAsFactors = old.strAF))
options(stringsAsFactors = FALSE)
if (!any(dynamic)) mode <- "static" else mode <- "dynamic"
# Starting xml
xmlFile <- XML::newXMLDoc(addFinalizer=TRUE)
gexf <- XML::newXMLNode(name="gexf", doc = xmlFile)
# gexf att
XML::newXMLNamespace(node=gexf, namespace=vers$xmlns)
XML::newXMLNamespace(
node=gexf, namespace=vers$`xmlns:vis`, prefix="viz")
XML::newXMLNamespace(
node=gexf, namespace="http://www.w3.org/2001/XMLSchema-instance",
prefix="xsi"
)
XML::xmlAttrs(gexf) <- c(
"xsi:schemaLocation" = vers$`xsi:schemaLocation`, version=vers$number)
# graph
xmlMeta <- XML::newXMLNode(name="meta",
attrs=list(lastmodifieddate=as.character(Sys.Date())),
parent=gexf)
XML::newXMLNode(name="creator", meta$creator, parent=xmlMeta)
XML::newXMLNode(name="description", meta$description, parent=xmlMeta)
XML::newXMLNode(name="keywords", meta$keywords, parent=xmlMeta)
xmlGraph <- XML::newXMLNode(name="graph", parent=gexf)
if (mode == "dynamic") {
# Fixing time factors
if (keepFactors) {
for(i in 1:2) {
if (dynamic[1])
nodeDynamic[,i] <- as.numeric(nodeDynamic[,i])
if (dynamic[2])
edgeDynamic[,i] <- as.numeric(edgeDynamic[,i])
}
} else {
for(i in 1:2) {
if (dynamic[1])
nodeDynamic[,i] <- as.character(nodeDynamic[,i])
if (dynamic[2])
edgeDynamic[,i] <- as.character(edgeDynamic[,i])
}
}
strTime <- c(unlist(nodeDynamic),unlist(edgeDynamic))
endTime <- strTime
# Checking start and ends
strTime <- min(strTime, na.rm=TRUE)
endTime <- max(endTime, na.rm=TRUE)
XML::xmlAttrs(xmlGraph) <- c(mode=mode, start=strTime, end=endTime,
timeformat=tFormat, defaultedgetype=defaultedgetype)
# Replacing NAs
if (dynamic[1]) {
nodeDynamic[is.na(nodeDynamic[,1]),1] <- strTime
nodeDynamic[is.na(nodeDynamic[,2]),2] <- endTime
}
if (dynamic[2]) {
edgeDynamic[is.na(edgeDynamic[,1]),1] <- strTime
edgeDynamic[is.na(edgeDynamic[,2]),2] <- endTime
}
} else
XML::xmlAttrs(xmlGraph) <- c(mode=mode, defaultedgetype=defaultedgetype)
datatypes <- matrix(
c(
"string", "character",
"integer", "integer",
"float", "double",
"boolean", "logical"
), byrow=TRUE, ncol =2)
# nodes att definitions
if (nNodesAtt > 0) {
TIT <- colnames(nodesAtt)
TYPE <- unlist(lapply(nodesAtt, typeof))
CLASS <- unlist(lapply(nodesAtt, class))
# Checks for factors (factor replacing is done later)
if (keepFactors) TYPE[CLASS == "factor"] <- "integer"
else TYPE[CLASS == "factor"] <- "string"
nodesAttDf <- data.frame(
id = paste("att",1:nNodesAtt,sep=""),
title = TIT,
type = TYPE
)
# Fixing datatype
for (i in 1:NROW(datatypes)) {
nodesAttDf$type <- gsub(datatypes[i,2], datatypes[i,1], nodesAttDf$type)
}
xmlAttNodes <- XML::newXMLNode(name="attributes", parent=xmlGraph)
XML::xmlAttrs(xmlAttNodes) <- c(class="node", mode="static")
.defAtt(nodesAttDf, parent=xmlAttNodes)
}
else {
nodesAttDf <- NULL
}
# edges att
if (nEdgesAtt > 0) {
TIT <- colnames(edgesAtt)
TYPE <- unlist(lapply(edgesAtt, typeof))
CLASS <- unlist(lapply(edgesAtt, class))
# Checks for factors (factor replacing is done later)
if (keepFactors) TYPE[CLASS == "factor"] <- "integer"
else TYPE[CLASS == "factor"] <- "string"
edgesAttDf <- data.frame(
id = paste("att",1:nEdgesAtt,sep=""),
title = TIT,
type = TYPE
)
# Fixing datatype
for (i in 1:NROW(datatypes)) {
edgesAttDf$type <- gsub(datatypes[i,2], datatypes[i,1], edgesAttDf$type)
}
xmlAttEdges <- XML::newXMLNode(name="attributes", parent=xmlGraph)
XML::xmlAttrs(xmlAttEdges) <- c(class="edge", mode="static")
.defAtt(edgesAttDf, parent=xmlAttEdges)
}
else {
edgesAttDf <- NULL
}
# nodes vizatt ---------------------------------------------------------------
ListNodesVizAtt <- if (nNodesVizAtt > 0)
do.call(cbind, unname(nodesVizAtt))
else
NULL
nodesVizAtt <- lapply(nodesVizAtt, function(x) {
colnames(x) <- gsub("^viz[.][a-zA-Z]+[.]", "", colnames(x))
x
})
# edges vizatt ---------------------------------------------------------------
ListEdgesVizAtt <- if (nEdgesVizAtt >0)
do.call(cbind, unname(edgesVizAtt))
else
NULL
edgesVizAtt <- lapply(edgesVizAtt, function(x) {
colnames(x) <- gsub("^viz[.][a-zA-Z]+[.]", "", colnames(x))
x
})
##############################################################################
# The basic char matrix definition for nodes
if (dynamic[1])
nodeDynamic <- as.data.frame(nodeDynamic)
if (nNodesAtt > 0)
nodesAtt <- data.frame(nodesAtt)
for (set in c(nodeDynamic, nodesAtt, ListNodesVizAtt)) {
try(nodes <- data.frame(nodes, set), silent=TRUE)
}
# Naming the columns
attNames <- nodesAttDf["id"]
if (!is.null(nodeDynamic))
tmeNames <- c("start", "end")
else
tmeNames <- NULL
colnames(nodes) <- unlist(
c("id", "label", tmeNames, attNames, colnames(ListNodesVizAtt))
)
# Fixing factors
if (keepFactors) {
tofix <- which(lapply(nodes, class) %in% "factor")
if (length(tofix)) {
warning("Factor variables will be stored as -numeric-.",
"\nIf you don't want this behavior, set -keepFactors- as -FALSE-.")
nodes[,tofix] <- lapply(nodes[,tofix,drop=FALSE], as.numeric)
}
} else {
tofix <- which(lapply(nodes, class) %in% "factor")
if (length(tofix))
nodes[,tofix] <- lapply(nodes[,tofix,drop=FALSE], as.character)
}
# NODES
xmlNodes <- XML::newXMLNode(name="nodes", parent=xmlGraph)
.addNodesEdges(nodes, xmlNodes, "node")
##############################################################################
# The basic dataframe definition for edges
if (dynamic[2]) edgeDynamic <- as.data.frame(edgeDynamic)
if (nEdgesAtt > 0) edgesAtt <- data.frame(edgesAtt)
# Adding edge id
try(edgesId <- cbind(edgesId, edgesLabel), silent=TRUE)
edges <- cbind(edgesId, edges)
for (set in c(edgeDynamic, edgesAtt, ListEdgesVizAtt)) {
try(edges <- data.frame(edges, set), silent=TRUE)
}
# Naming the columns
attNames <- edgesAttDf["id"]
if (!is.null(edgeDynamic))
tmeNames <- c("start", "end")
else
tmeNames <- NULL
# Generating weights
if (!length(edgesWeight)) edgesWeight <- 1
edges <- data.frame(edges, x=as.numeric(edgesWeight))
edges$x <- sprintf(fmt, edges$x)
# Seting colnames
if (length(edgesLabel) > 0) edgesLabelCName <- "label"
else edgesLabelCName <- NULL
colnames(edges) <- unlist(c("id", edgesLabelCName, "source", "target",
tmeNames, attNames, colnames(ListEdgesVizAtt),
"weight"))
# EDGES
xmlEdges <- XML::newXMLNode(name="edges", parent=xmlGraph)
# Fixing factors
if (keepFactors) {
for (i in colnames(edges)) {
if (class(edges[[i]]) == "factor")
edges[[i]] <- as.numeric(edges[[i]])
}
} else {
for (i in colnames(edges)) {
if (class(edges[[i]]) == "factor")
edges[[i]] <- as.character(edges[[i]])
}
}
# Adding edges
.addNodesEdges(edges, xmlEdges, "edge")
# Edges Label (for data frame)
if (length(edgesLabel) == 0) edgesLabel <- edges[,"id"]
results <- build.and.validate.gexf(
meta = meta,
mode = list(defaultedgetype=defaultedgetype, mode=mode),
atts.definitions = list(nodes = nodesAttDf, edges = edgesAttDf),
nodesVizAtt = nodesVizAtt,
edgesVizAtt = edgesVizAtt,
nodes = as.data.frame(nodes),
edges = as.data.frame(cbind(edges,edgesLabel)),
graph = XML::saveXML(xmlFile, encoding=encoding)
)
# Fixing
for (viz in c("color", "size", "shape", "position"))
results$graph <- gsub(
sprintf("<%s",viz),
sprintf("<viz:%s", viz),
results$graph,
fixed=TRUE)
# Returns
if (is.na(output)) {
return(results)
} else {
# warning("Starting version 0.17.0")
write.gexf(results, output=output, replace=TRUE)
}
}
#' @export
#' @rdname gexf-class
write.gexf <- function(nodes, ...) {
if (!inherits(nodes, "gexf")) {
gexf(nodes, ...)
} else if (length(list(...)$output)) {
output <- list(...)$output
f <- file(description = output, open="w",encoding='UTF-8')
write(nodes$graph, file=f)
close.connection(f)
message('GEXF graph successfully written at:\n',normalizePath(output))
return(invisible(nodes))
}
}
| /R/rgexf.r | permissive | jbdatascience/rgexf | R | false | false | 27,188 | r | #' Decompose an edge list
#'
#' Generates two data frames (nodes and edges) from a list of edges
#'
#' `edge.list` transforms the input into a two-elements list containing a
#' dataframe of nodes (with columns \dQuote{id} and \dQuote{label}) and a
#' dataframe of edges. The last one is numeric (with columns \dQuote{source}
#' and \dQuote{target}) and based on autogenerated nodes' ids.
#'
#' @param x A matrix or data frame structured as a list of edges
#' @return A list containing two data frames.
#' @author George Vega Yon
#'
#' Jorge Fabrega Lacoa
#' @keywords manip
#' @examples
#'
#' edgelist <- matrix(
#' c("matthew","john",
#' "max","stephen",
#' "matthew","stephen"),
#' byrow=TRUE, ncol=2)
#'
#' edge.list(edgelist)
#'
#' @export
edge.list <- function(x) {
################################################################################
# Translate a edgelist to two objects list (nodes + edges)
################################################################################
objClass <- class(x)
k <- ncol(x)
if (any(c("matrix", "data.frame") %in% objClass)) {
if (k == 2) {
# If it is not a factor
if (!is.factor(x)) x <- factor(c(x[,1], x[,2]))
edges <- matrix(unclass(x), byrow=FALSE, ncol=2)
colnames(edges) <- c("source","target")
nodes <- data.frame(id=1:nlevels(x), label=levels(x), stringsAsFactors=FALSE)
return(list(nodes=nodes, edges=edges))
}
else stop("Insufficient number of columns (", k,")")
}
else stop("-", objClass,
"- class not supported, try with a \"matrix\" or a \"data.frame\"")
}
.defAtt <- function(x, parent) {
################################################################################
# Prints the nodes and edges att definition
################################################################################
apply(x, MARGIN=1,
function(x, PAR) {
XML::newXMLNode(name="attribute", parent=PAR, attrs=x)
}, PAR=parent)
}
.addAtts <- function(tmpatt, attvec, tmpdoc=NULL) {
################################################################################
# Builds app proper XML attrs statement to be parsed by parseXMLAndAdd
################################################################################
tmpatt <- data.frame(
"for"=paste("att",attvec,sep=""),
value=unlist(tmpatt, recursive=FALSE), check.names=FALSE
)
for (i in attvec)
tmpdoc <- c(tmpdoc, .writeXMLLine("attvalue", tmpatt[i,,drop=FALSE ]) , sep="")
paste(c("<attvalues>", tmpdoc, "</attvalues>"), sep="", collapse="")
}
.writeXMLLine <- function(type, obj, finalizer=TRUE) {
################################################################################
# Builds as character whatever XML line is needed
################################################################################
paste("<", type, " " ,
paste(colnames(obj)[!is.na(obj)],obj[!is.na(obj)], sep="=\"", collapse="\" "),
ifelse(finalizer, "\"/>","\">"), sep="")
}
.addNodesEdges <- function(dataset, PAR, type="node", doc) {
################################################################################
# Prints the nodes and edges
################################################################################
n <- NROW(dataset)
vec <- 1:n
xvars <- colnames(dataset)
noattnames <- xvars[grep("(^att[0-9])|(^viz[.])", xvars, invert=T)]
datasetnoatt <- dataset[, noattnames, drop=FALSE]
# Parsing user-define attributes
if (attributes <- length(grep("^att", xvars)) > 0) {
attnames <- colnames(dataset)[grep("^att", xvars)]
att <- dataset[,attnames, drop=FALSE]
attvec <- 1:length(attnames)
}
# Parsing VIZ attributes
if ((vizattributes <- length(grep("^viz[.]", xvars)) > 0)) {
vizattnames <- colnames(dataset)[grep("^viz[.]", xvars)]
# Color atts
if ((vizcolors <- any(grepl("^viz[.]color",vizattnames)))) {
vizcol.df <- dataset[,grep("^viz[.]color[.]", vizattnames, value=TRUE)]
colnames(vizcol.df) <- gsub("^viz[.]color[.]", "", colnames(vizcol.df))
}
# Pos att
if ((vizposition <- any(grepl("^viz[.]position",vizattnames)))) {
vizpos.df <- dataset[,grep("^viz[.]position[.]", vizattnames, value=TRUE), drop=FALSE]
colnames(vizpos.df) <- gsub("^viz[.]position[.]", "", colnames(vizpos.df))
}
# Size att
if ((vizsize <- any(grepl("^viz[.]size",vizattnames)))) {
vizsiz.df <- dataset[,grep("^viz[.]size[.]", vizattnames, value=TRUE), drop=FALSE]
colnames(vizsiz.df) <- gsub("^viz[.]size[.]", "", colnames(vizsiz.df))
}
# Shape att
if ((vizshape <- any(grepl("^viz[.]shape",vizattnames)))) {
vizshp.df <- dataset[,grep("^viz[.]shape[.]", vizattnames, value=TRUE), drop=FALSE]
colnames(vizshp.df) <- gsub("^viz[.]shape[.]", "", colnames(vizshp.df))
}
# Image att
if ((vizimage <- any(grepl("^viz[.]image",vizattnames)))) {
vizimg.df <- dataset[,grep("^viz[.]image[.]", vizattnames, value=TRUE), drop=FALSE]
colnames(vizimg.df) <- c("value", "uri")
}
# Thickness att
if ((viztness <- any(grepl("^viz[.]size",vizattnames)))) {
vizthk.df <- dataset[,grep("^viz[.]size[.]", vizattnames, value=TRUE), drop=FALSE]
colnames(vizthk.df) <- gsub("^viz[.]size[.]", "", colnames(vizthk.df))
}
}
# Free memory
rm(dataset)
# Loop if there are not any attributes
if (!attributes && !vizattributes) {
for (i in vec) {
XML::parseXMLAndAdd(.writeXMLLine(type, datasetnoatt[i,,drop=FALSE]),parent=PAR)
}
return(NULL)
}
# Loop if only there are attributes
if (attributes && !vizattributes) {
for (i in vec) {
# Adding directly
XML::parseXMLAndAdd(
paste(.writeXMLLine(type, datasetnoatt[i,,drop=FALSE], finalizer=FALSE),
.addAtts(att[i,], attvec), # Builds atts definition
"</",type,">",sep=""),
parent=PAR)
}
return(NULL)
}
# Loop if there are attributes and viz attributes
for (i in vec) {
# Node/Edge + Atts
if (attributes) {
tempnode0 <- paste(
.writeXMLLine(type, datasetnoatt[i,,drop=FALSE], finalizer=FALSE),
.addAtts(att[i,], attvec), sep="")
}
else tempnode0 <- .writeXMLLine(type, datasetnoatt[i,,drop=FALSE], finalizer=FALSE)
# Viz Att printing
# Colors
if (vizcolors) {
tempnode0 <- paste(tempnode0, .writeXMLLine("color", vizcol.df[i,,drop=FALSE]),
sep="")
}
# Position
if (vizposition) {
tempnode0 <- paste(tempnode0, .writeXMLLine("position", vizpos.df[i,,drop=FALSE]),
sep="")
}
# Size
if (vizsize) {
tempnode0 <- paste(tempnode0, .writeXMLLine("size", vizsiz.df[i,1, drop=FALSE]),
sep="")
}
# Shape
if (vizshape) {
tempnode0 <- paste(tempnode0, .writeXMLLine("shape", vizshp.df[i,1,drop=FALSE]),
sep="")
}
# Image
if (vizimage) {
tempnode0 <- paste(tempnode0, .writeXMLLine("shape", vizimg.df[i,,drop=FALSE]),
sep="")
}
XML::parseXMLAndAdd(sprintf("%s</%s>",tempnode0, type), parent=PAR)
}
return(NULL)
}
#' Creates an object of class `gexf`
#'
#' Takes a `node` matrix (or dataframe) and an
#' `edge` matrix (or dataframe) and creates a `gexf` object
#' containing a data-frame representation and a gexf representation of a graph.
#'
#' @details
#' Just like `nodesVizAtt` and `edgesVizAtt`, `nodesAtt` and
#' `edgesAtt` must have the same number of rows as nodes and edges,
#' respectively. Using data frames is necessary as in this way data types are
#' preserved.
#'
#' `nodesVizAtt` and `edgesVizAtt` allow using visual attributes such
#' as color, position (nodes only), size (nodes only), thickness (edges only)
#' shape and image (nodes only). \itemize{ \item Color is defined by the RGBA
#' color model, thus for every node/edge the color should be specified through
#' a data-frame with columns *r* (red), *g* (green), *b* (blue)
#' with integers between 0 and 256 and a last column with *alpha* values
#' as a float between 0.0 and 1.0. \item Position, for every node, it is a
#' three-column data-frame including *x*, *y* and *z*
#' coordinates. The three components must be float. \item Size as a numeric
#' colvector (float values). \item Thickness (see size). \item Node Shape
#' (string), currently unsupported by Gephi, can take the values of
#' *disk*, *square*, *triangle*, *diamond* and
#' *image*. \item Edge Shape (string), currently unsupported by Gephi, can
#' take the values of *solid*, *dotted*, *dashed* and
#' *double*. \item Image (string), currently unsupported by Gephi,
#' consists on a vector of strings representing URIs. }
#'
#' `nodeDynamic` and `edgeDynamic` allow to draw dynamic graphs. It
#' should contain two columns *start* and *end*, both allowing
#' `NA` value. It can be use jointly with `tFormat` which by default
#' is set as \dQuote{double}. Currently accepted time formats are: \itemize{
#' \item Integer or double. \item International standard *date*
#' yyyy-mm-dd. \item dateTime W3 XSD
#' (http://www.w3.org/TR/xmlschema-2/#dateTime). }
#'
#' @param nodes A two-column data-frame or matrix of \dQuote{id}s and
#' \dQuote{label}s representing nodes.
#' @param edges A two-column data-frame or matrix containing \dQuote{source}
#' and \dQuote{target} for each edge. Source and target values are based on the
#' nodes ids.
#' @param edgesId A one-column data-frame, matrix or vector.
#' @param edgesLabel A one-column data-frame, matrix or vector.
#' @param edgesAtt A data-frame with one or more columns representing edges'
#' attributes.
#' @param edgesWeight A numeric vector containing edges' weights.
#' @param edgesVizAtt List of three or less viz attributes such as color, size
#' (thickness) and shape of the edges (see details)
#' @param nodesAtt A data-frame with one or more columns representing nodes'
#' attributes
#' @param nodesVizAtt List of four or less viz attributes such as color,
#' position, size and shape of the nodes (see details)
#' @param nodeDynamic A two-column matrix or data-frame. The first column
#' indicates the time at which a given node starts; the second one shows when
#' it ends. The matrix or data-frame must have the same number of rows than the
#' number of nodes in the graph.
#' @param edgeDynamic A two-column matrix or data-frame. The fist column
#' indicates the time at which a given edge stars; the second one shows when it
#' ends. The matrix or dataframe must have the same number of rows than the
#' number of edges in the graph.
#' @param digits Integer. Number of decimals to keep for nodes/edges sizes. See
#' [print.default()]
#' @param output String. The complete path (including filename) where to export
#' the graph as a GEXF file.
#' @param tFormat String. Time format for dynamic graphs (see details)
#' @param defaultedgetype \dQuote{directed}, \dQuote{undirected},
#' \dQuote{mutual}
#' @param meta A List. Meta data describing the graph
#' @param keepFactors Logical, whether to handle factors as numeric values
#' (`TRUE`) or as strings (`FALSE`) by using `as.character`.
#' @param encoding Encoding of the graph.
#' @param vers Character scalar. Version of the GEXF format to generate.
#' By default `"1.3"`.
#' @param ... Passed to `gexf`.
#' @param rescale.node.size Logical scalar. When `TRUE` it rescales the
#' size of the vertices such that the largest one is about \%5 of the plotting
#' region.
#' @return A `gexf` class object (list). Contains the following: \itemize{
#' \item `meta` : (list) Meta data describing the graph. \item
#' `mode` : (list) Sets the default edge type and the graph mode. \item
#' `atts.definitions`: (list) Two data-frames describing nodes and edges
#' attributes. \item `nodesVizAtt` : (data-frame) A multi-column
#' data-frame with the nodes' visual attributes. \item `edgesVizAtt` :
#' (data-frame) A multi-column data-frame with the edges' visual attributes.
#' \item `nodes` : (data-frame) A two-column data-frame with nodes' ids
#' and labels. \item `edges` : (data-frame) A five-column data-frame with
#' edges' ids, labels, sources, targets and weights. \item `graph` :
#' (String) GEXF (XML) representation of the graph. }
#' @author George Vega Yon
#'
#' Jorge Fabrega Lacoa
#' @seealso [new.gexf.graph()]
#' @references The GEXF project website: http://gexf.net
#' @keywords IO
#' @examples
#'
#' \dontrun{
#' demo(gexf) # Example of gexf command using fictional data.
#' demo(gexfattributes) # Working with attributes.
#' demo(gexfbasic) # Basic net.
#' demo(gexfdynamic) # Dynamic net.
#' demo(edge.list) # Working with edges lists.
#' demo(gexffull) # All the package.
#' demo(gexftwitter) # Example with real data of chilean twitter accounts.
#' demo(gexfdynamicandatt) # Dynamic net with static attributes.
#' demo(gexfbuildfromscratch) # Example building a net from scratch.
#' }
#'
#' @name gexf-class
NULL
default_nodeVizAtt <- list(
size = function() 8.0,
position = function() structure(c(runif(2, -200, 200), 0), names = c("x", "y", "z")),
color = function() "tomato"
)
set_default_nodeVizAtt <- function() {
# Getting parameters
env <- parent.frame()
n <- nrow(env[["nodes"]])
# Checking viz attributes
for (att in names(default_nodeVizAtt))
if (!length(env[["nodesVizAtt"]][[att]])) {
env[["nodesVizAtt"]][[att]] <- do.call(
rbind,
lapply(1L:n, function(i) default_nodeVizAtt[[att]]())
)
}
}
#' @export
#' @rdname gexf-class
gexf <- function(
################################################################################
# Prints the gexf file
################################################################################
nodes,
edges,
edgesLabel=NULL,
edgesId=NULL,
edgesAtt=NULL,
edgesWeight=NULL,
edgesVizAtt = list(color=NULL, size=NULL, shape=NULL),
nodesAtt=NULL,
nodesVizAtt = list(color=NULL, position=NULL, size=NULL, shape=NULL, image=NULL),
nodeDynamic=NULL,
edgeDynamic=NULL,
digits=getOption("digits"),
output = NA,
tFormat="double",
defaultedgetype = "undirected",
meta = list(
creator = "NodosChile",
description = "A GEXF file written in R with \"rgexf\"",
keywords = "GEXF, NodosChile, R, rgexf, Gephi"
),
keepFactors = FALSE,
encoding = "UTF-8",
vers = "1.3",
rescale.node.size = TRUE
) {
##############################################################################
# CLASS CHECKS AND OTHERS CHECKS
# Nodes
if (inherits(nodes, c("data.frame", "matrix"))) {
if (ncol(nodes) != 2)
stop("-nodes- should have two columns not ", ncol(nodes))
}
else
stop("Invalid object type: -nodes- should be a two column data.frame or a matrix")
# Edges
if (inherits(edges, c("data.frame", "matrix"))) {
if (ncol(edges) != 2)
stop("-edges- should have two columns not ", ncol(edges))
}
else
stop("Invalid object type: -edges- should be a two column data.frame or a matrix")
# version
vers <- gexf_version(vers)
n <- nrow(nodes)
m <- nrow(edges)
# Edges Label
.parseEdgesLabel(edgesLabel, edges)
# Parsing Edges Id
edgesId <- .parseEdgesId(edgesId, edges)
# Parsing Edges Att
nEdgesAtt <- .parseEdgesAtt(edgesAtt, edges)
# Parsing edges Weight
.parseEdgesWeight(edgesWeight, edges)
# Parsing edges Viz Att
edgesVizAtt <- if (length(unlist(edgesVizAtt))) {
nodesVizAtt <- nodesVizAtt[sapply(edgesVizAtt, length) > 0]
Map(function(a, b) parseVizAtt(a, b, m, "edges"), a=names(edgesVizAtt),
b=edgesVizAtt)
} else NULL
nEdgesVizAtt <- length(edgesVizAtt)
# nEdgesVizAtt <- .parseEdgesVizAtt(edgesVizAtt, edges)
# Nodes Att
nNodesAtt <- .parseNodesAtt(nodesAtt, nodes)
# Parsing nodes Viz Atts
set_default_nodeVizAtt()
# Rescaling vertex sizes if required
if (rescale.node.size) {
# Getting ranges
sscale <- apply(nodesVizAtt$position, 2, range)
sscale <- max(sscale[2 ,] - sscale[1 ,])*.01
nodesVizAtt$size <- nodesVizAtt$size/max(nodesVizAtt$size)*sscale
}
nodesVizAtt <- if (length(unlist(nodesVizAtt))) {
# # Removing empty ones
nodesVizAtt <- nodesVizAtt[sapply(nodesVizAtt, length) > 0]
Map(function(a, b) parseVizAtt(a, b, n, "nodes"), a=names(nodesVizAtt),
b=nodesVizAtt)
} else NULL
nNodesVizAtt <- length(nodesVizAtt)
# Checking the number of digits
if (!is.integer(digits)) stop("Invalid number of digits ",digits,
".\n Must be a number between 0 and 22")
fmt <- sprintf("%%.%gg", digits)
# Dynamics
dynamic <- c(FALSE, FALSE)
if (length(nodeDynamic) > 0) {
if (is.data.frame(nodeDynamic) | is.matrix(nodeDynamic)) {
if (NROW(nodeDynamic) == NROW(nodes)) dynamic[1] <- TRUE
else stop("Insufficient number of rows: -nodeDynamic- (",NROW(nodeDynamic),
" rows) should have the same number of rows than nodes there are (",
NROW(nodes),")")
} else stop("Invalid object type: -nodeDynamic- should be a two columns data.frame or a matrix")
}
if (length(edgeDynamic) > 0) {
if (is.data.frame(edgeDynamic) | is.matrix(edgeDynamic)) {
if (NROW(edgeDynamic) == NROW(edges)) dynamic[2] <- TRUE
else stop("Insufficient number of rows: -edgeDynamic- (",NROW(edgeDynamic), " rows) should have the same number of rows than edges there are (", NROW(edges),")")
} else stop("Invalid object type: -edgeDynamic- should be a two columns data.frame or a matrix")
}
##############################################################################
# Strings
old.strAF <- getOption("stringsAsFactors")
on.exit(options(stringsAsFactors = old.strAF))
options(stringsAsFactors = FALSE)
if (!any(dynamic)) mode <- "static" else mode <- "dynamic"
# Starting xml
xmlFile <- XML::newXMLDoc(addFinalizer=TRUE)
gexf <- XML::newXMLNode(name="gexf", doc = xmlFile)
# gexf att
XML::newXMLNamespace(node=gexf, namespace=vers$xmlns)
XML::newXMLNamespace(
node=gexf, namespace=vers$`xmlns:vis`, prefix="viz")
XML::newXMLNamespace(
node=gexf, namespace="http://www.w3.org/2001/XMLSchema-instance",
prefix="xsi"
)
XML::xmlAttrs(gexf) <- c(
"xsi:schemaLocation" = vers$`xsi:schemaLocation`, version=vers$number)
# graph
xmlMeta <- XML::newXMLNode(name="meta",
attrs=list(lastmodifieddate=as.character(Sys.Date())),
parent=gexf)
XML::newXMLNode(name="creator", meta$creator, parent=xmlMeta)
XML::newXMLNode(name="description", meta$description, parent=xmlMeta)
XML::newXMLNode(name="keywords", meta$keywords, parent=xmlMeta)
xmlGraph <- XML::newXMLNode(name="graph", parent=gexf)
if (mode == "dynamic") {
# Fixing time factors
if (keepFactors) {
for(i in 1:2) {
if (dynamic[1])
nodeDynamic[,i] <- as.numeric(nodeDynamic[,i])
if (dynamic[2])
edgeDynamic[,i] <- as.numeric(edgeDynamic[,i])
}
} else {
for(i in 1:2) {
if (dynamic[1])
nodeDynamic[,i] <- as.character(nodeDynamic[,i])
if (dynamic[2])
edgeDynamic[,i] <- as.character(edgeDynamic[,i])
}
}
strTime <- c(unlist(nodeDynamic),unlist(edgeDynamic))
endTime <- strTime
# Checking start and ends
strTime <- min(strTime, na.rm=TRUE)
endTime <- max(endTime, na.rm=TRUE)
XML::xmlAttrs(xmlGraph) <- c(mode=mode, start=strTime, end=endTime,
timeformat=tFormat, defaultedgetype=defaultedgetype)
# Replacing NAs
if (dynamic[1]) {
nodeDynamic[is.na(nodeDynamic[,1]),1] <- strTime
nodeDynamic[is.na(nodeDynamic[,2]),2] <- endTime
}
if (dynamic[2]) {
edgeDynamic[is.na(edgeDynamic[,1]),1] <- strTime
edgeDynamic[is.na(edgeDynamic[,2]),2] <- endTime
}
} else
XML::xmlAttrs(xmlGraph) <- c(mode=mode, defaultedgetype=defaultedgetype)
datatypes <- matrix(
c(
"string", "character",
"integer", "integer",
"float", "double",
"boolean", "logical"
), byrow=TRUE, ncol =2)
# nodes att definitions
if (nNodesAtt > 0) {
TIT <- colnames(nodesAtt)
TYPE <- unlist(lapply(nodesAtt, typeof))
CLASS <- unlist(lapply(nodesAtt, class))
# Checks for factors (factor replacing is done later)
if (keepFactors) TYPE[CLASS == "factor"] <- "integer"
else TYPE[CLASS == "factor"] <- "string"
nodesAttDf <- data.frame(
id = paste("att",1:nNodesAtt,sep=""),
title = TIT,
type = TYPE
)
# Fixing datatype
for (i in 1:NROW(datatypes)) {
nodesAttDf$type <- gsub(datatypes[i,2], datatypes[i,1], nodesAttDf$type)
}
xmlAttNodes <- XML::newXMLNode(name="attributes", parent=xmlGraph)
XML::xmlAttrs(xmlAttNodes) <- c(class="node", mode="static")
.defAtt(nodesAttDf, parent=xmlAttNodes)
}
else {
nodesAttDf <- NULL
}
# edges att
if (nEdgesAtt > 0) {
TIT <- colnames(edgesAtt)
TYPE <- unlist(lapply(edgesAtt, typeof))
CLASS <- unlist(lapply(edgesAtt, class))
# Checks for factors (factor replacing is done later)
if (keepFactors) TYPE[CLASS == "factor"] <- "integer"
else TYPE[CLASS == "factor"] <- "string"
edgesAttDf <- data.frame(
id = paste("att",1:nEdgesAtt,sep=""),
title = TIT,
type = TYPE
)
# Fixing datatype
for (i in 1:NROW(datatypes)) {
edgesAttDf$type <- gsub(datatypes[i,2], datatypes[i,1], edgesAttDf$type)
}
xmlAttEdges <- XML::newXMLNode(name="attributes", parent=xmlGraph)
XML::xmlAttrs(xmlAttEdges) <- c(class="edge", mode="static")
.defAtt(edgesAttDf, parent=xmlAttEdges)
}
else {
edgesAttDf <- NULL
}
# nodes vizatt ---------------------------------------------------------------
ListNodesVizAtt <- if (nNodesVizAtt > 0)
do.call(cbind, unname(nodesVizAtt))
else
NULL
nodesVizAtt <- lapply(nodesVizAtt, function(x) {
colnames(x) <- gsub("^viz[.][a-zA-Z]+[.]", "", colnames(x))
x
})
# edges vizatt ---------------------------------------------------------------
ListEdgesVizAtt <- if (nEdgesVizAtt >0)
do.call(cbind, unname(edgesVizAtt))
else
NULL
edgesVizAtt <- lapply(edgesVizAtt, function(x) {
colnames(x) <- gsub("^viz[.][a-zA-Z]+[.]", "", colnames(x))
x
})
##############################################################################
# The basic char matrix definition for nodes
if (dynamic[1])
nodeDynamic <- as.data.frame(nodeDynamic)
if (nNodesAtt > 0)
nodesAtt <- data.frame(nodesAtt)
for (set in c(nodeDynamic, nodesAtt, ListNodesVizAtt)) {
try(nodes <- data.frame(nodes, set), silent=TRUE)
}
# Naming the columns
attNames <- nodesAttDf["id"]
if (!is.null(nodeDynamic))
tmeNames <- c("start", "end")
else
tmeNames <- NULL
colnames(nodes) <- unlist(
c("id", "label", tmeNames, attNames, colnames(ListNodesVizAtt))
)
# Fixing factors
if (keepFactors) {
tofix <- which(lapply(nodes, class) %in% "factor")
if (length(tofix)) {
warning("Factor variables will be stored as -numeric-.",
"\nIf you don't want this behavior, set -keepFactors- as -FALSE-.")
nodes[,tofix] <- lapply(nodes[,tofix,drop=FALSE], as.numeric)
}
} else {
tofix <- which(lapply(nodes, class) %in% "factor")
if (length(tofix))
nodes[,tofix] <- lapply(nodes[,tofix,drop=FALSE], as.character)
}
# NODES
xmlNodes <- XML::newXMLNode(name="nodes", parent=xmlGraph)
.addNodesEdges(nodes, xmlNodes, "node")
##############################################################################
# The basic dataframe definition for edges
if (dynamic[2]) edgeDynamic <- as.data.frame(edgeDynamic)
if (nEdgesAtt > 0) edgesAtt <- data.frame(edgesAtt)
# Adding edge id
try(edgesId <- cbind(edgesId, edgesLabel), silent=TRUE)
edges <- cbind(edgesId, edges)
for (set in c(edgeDynamic, edgesAtt, ListEdgesVizAtt)) {
try(edges <- data.frame(edges, set), silent=TRUE)
}
# Naming the columns
attNames <- edgesAttDf["id"]
if (!is.null(edgeDynamic))
tmeNames <- c("start", "end")
else
tmeNames <- NULL
# Generating weights
if (!length(edgesWeight)) edgesWeight <- 1
edges <- data.frame(edges, x=as.numeric(edgesWeight))
edges$x <- sprintf(fmt, edges$x)
# Seting colnames
if (length(edgesLabel) > 0) edgesLabelCName <- "label"
else edgesLabelCName <- NULL
colnames(edges) <- unlist(c("id", edgesLabelCName, "source", "target",
tmeNames, attNames, colnames(ListEdgesVizAtt),
"weight"))
# EDGES
xmlEdges <- XML::newXMLNode(name="edges", parent=xmlGraph)
# Fixing factors
if (keepFactors) {
for (i in colnames(edges)) {
if (class(edges[[i]]) == "factor")
edges[[i]] <- as.numeric(edges[[i]])
}
} else {
for (i in colnames(edges)) {
if (class(edges[[i]]) == "factor")
edges[[i]] <- as.character(edges[[i]])
}
}
# Adding edges
.addNodesEdges(edges, xmlEdges, "edge")
# Edges Label (for data frame)
if (length(edgesLabel) == 0) edgesLabel <- edges[,"id"]
results <- build.and.validate.gexf(
meta = meta,
mode = list(defaultedgetype=defaultedgetype, mode=mode),
atts.definitions = list(nodes = nodesAttDf, edges = edgesAttDf),
nodesVizAtt = nodesVizAtt,
edgesVizAtt = edgesVizAtt,
nodes = as.data.frame(nodes),
edges = as.data.frame(cbind(edges,edgesLabel)),
graph = XML::saveXML(xmlFile, encoding=encoding)
)
# Fixing
for (viz in c("color", "size", "shape", "position"))
results$graph <- gsub(
sprintf("<%s",viz),
sprintf("<viz:%s", viz),
results$graph,
fixed=TRUE)
# Returns
if (is.na(output)) {
return(results)
} else {
# warning("Starting version 0.17.0")
write.gexf(results, output=output, replace=TRUE)
}
}
#' @export
#' @rdname gexf-class
write.gexf <- function(nodes, ...) {
if (!inherits(nodes, "gexf")) {
gexf(nodes, ...)
} else if (length(list(...)$output)) {
output <- list(...)$output
f <- file(description = output, open="w",encoding='UTF-8')
write(nodes$graph, file=f)
close.connection(f)
message('GEXF graph successfully written at:\n',normalizePath(output))
return(invisible(nodes))
}
}
|
# Univariate
# Auto-correlation
acf(final.resid,na.action = na.pass,lag=60)
Box.test(final.resid,lag=6,type="Ljung",fitdf=5)
#Normality
par(mfrow=c(1,2))
hist(final.resid,breaks="FD",xlab='Standardized Residuals',main='Residual')
qqnorm(final.resid)
qqline(final.resid)
jarque.bera.test(na.omit(final.resid))
# Remove NA values na.omit()
residuals <- resid(arima_final)
#par(mfrow=c(3,1))
ts.plot(residuals)
acf(residuals)
pacf(residuals)
Box.test(resid(arima_final), lag = 7, type = "Box-Pierce", fitdf = 6)
Box.test(resid(arima_final), lag = 7, type = "Ljung-Box", fitdf = 6)
### Box test, lag used is sum of orders (7 = 3+1+3)
### Degrees of freedom, fitdf is lag - 1, so 6 here
#Multivariate
vs <- VARselect(train.ts)
vs$selection
#VAR(2)
mod <- VAR(train.ts,p=2)
## ARCH, Residual Analysis: Constant Variance Assumption
arch.test(mod)
## J-B, Residual Analysis: Normality Assumption
normality.test(mod)
## Portmantau, Residual Analysis: Uncorrelated Errors Assumption
serial.test(mod) | /statistical_tests.R | no_license | adib0073/R-time-series-utility | R | false | false | 996 | r | # Univariate
# Auto-correlation
acf(final.resid,na.action = na.pass,lag=60)
Box.test(final.resid,lag=6,type="Ljung",fitdf=5)
#Normality
par(mfrow=c(1,2))
hist(final.resid,breaks="FD",xlab='Standardized Residuals',main='Residual')
qqnorm(final.resid)
qqline(final.resid)
jarque.bera.test(na.omit(final.resid))
# Remove NA values na.omit()
residuals <- resid(arima_final)
#par(mfrow=c(3,1))
ts.plot(residuals)
acf(residuals)
pacf(residuals)
Box.test(resid(arima_final), lag = 7, type = "Box-Pierce", fitdf = 6)
Box.test(resid(arima_final), lag = 7, type = "Ljung-Box", fitdf = 6)
### Box test, lag used is sum of orders (7 = 3+1+3)
### Degrees of freedom, fitdf is lag - 1, so 6 here
#Multivariate
vs <- VARselect(train.ts)
vs$selection
#VAR(2)
mod <- VAR(train.ts,p=2)
## ARCH, Residual Analysis: Constant Variance Assumption
arch.test(mod)
## J-B, Residual Analysis: Normality Assumption
normality.test(mod)
## Portmantau, Residual Analysis: Uncorrelated Errors Assumption
serial.test(mod) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/A1-R1C1-regex-utils.R
\name{guess_fo}
\alias{guess_fo}
\title{Guess cell reference string format}
\usage{
guess_fo(x, fo = c("R1C1", "A1"))
}
\arguments{
\item{x}{character vector of cell reference strings}
\item{fo}{default to assume if format is ambiguous}
}
\value{
character vector consisting of \code{R1C1}, \code{A1}, or \code{NA}
}
\description{
Guess if cell references are in R1C1 or A1 format.
}
\examples{
A1 <- c("A1", "$A1", "A$1", "$A$1", "a1")
guess_fo(A1)
R1C1 <- c("R1C1", "R1C[-1]", "R[-1]C1", "R[-1]C[9]")
guess_fo(R1C1)
guess_fo("RC2")
guess_fo("12")
guess_fo(12)
}
| /man/guess_fo.Rd | no_license | StatisMike/cellranger | R | false | true | 666 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/A1-R1C1-regex-utils.R
\name{guess_fo}
\alias{guess_fo}
\title{Guess cell reference string format}
\usage{
guess_fo(x, fo = c("R1C1", "A1"))
}
\arguments{
\item{x}{character vector of cell reference strings}
\item{fo}{default to assume if format is ambiguous}
}
\value{
character vector consisting of \code{R1C1}, \code{A1}, or \code{NA}
}
\description{
Guess if cell references are in R1C1 or A1 format.
}
\examples{
A1 <- c("A1", "$A1", "A$1", "$A$1", "a1")
guess_fo(A1)
R1C1 <- c("R1C1", "R1C[-1]", "R[-1]C1", "R[-1]C[9]")
guess_fo(R1C1)
guess_fo("RC2")
guess_fo("12")
guess_fo(12)
}
|
#this function uses TMLE to estimate the transported stochastic direct and indirect effect
#for observed data O=(S,W,A,Z,M,Y), where
#S=1 is the source population and S=0 is the target population
#W are covariates
#A is an instrumental variable
#Z is an intermediate variable that is affected by instrument A
#M is a mediator variable and a function of W, Z under Model 1
# and a function of W, Z, A under Model 2
#Y is an outcome variable and a function of W, Z, M under Model 1
# and a function of W, Z, M, A under Model 2
# Model 1 includes the exclusion restriction assumptions (there is no direct effect of A on M or of A on Y)
# Model 2 allows the exclusion restriction not to hold
get_gstarM = function(data, forms, Wnames, pooled, gstar_S) {
dataZ1 = dataZ0 = dataA1 = dataA0 = data
# Define the parameter as to the mechanism used for M and Z
dataZ1$Z = 1
dataZ0$Z = 0
dataA1$A = 1
dataA0$A = 0
if (!pooled) {
Mstarfit = glm(formula=forms$MformStratS, data=data[data$S==gstar_S, ], family = "binomial")
Zstarfit = glm(formula=forms$ZformStratS, data=data[data$S==gstar_S, ], family = "binomial")
predMz1 = predict(Mstarfit, newdata = dataZ1, type = 'response')
predMz0 = predict(Mstarfit, newdata = dataZ0, type = 'response')
predZa0 = predict(Zstarfit, newdata = dataA0, type = 'response')
predZa1 = predict(Zstarfit, newdata = dataA1, type = 'response')
} else {
Mstarfit = glm(formula=forms$Mstarform, data=data, family = "binomial")
Zstarfit = glm(formula=forms$Zstarform, data=data, family = "binomial")
dataZ1$S = gstar_S
dataZ0$S = gstar_S
dataA1$S = gstar_S
dataA0$S = gstar_S
predMz1 = predict(Mstarfit, newdata = dataZ1, type = 'response')
predMz0 = predict(Mstarfit, newdata = dataZ0, type = 'response')
predZa0 = predict(Zstarfit, newdata = dataA0, type = 'response')
predZa1 = predict(Zstarfit, newdata = dataA1, type = 'response')
}
gstarM_astar0 = predMz1*predZa0 + predMz0*(1 - predZa0)
gstarM_astar1 = predMz1*predZa1 + predMz0*(1 - predZa1)
return(list(gstarM_astar1 = gstarM_astar1, gstarM_astar0 = gstarM_astar0))
}
transportmedtmle<-function(data, forms, Wnames, iv, gma1, gma0){
#get inital fit Q_Y
yfit<-glm(formula=forms$Yform, family="binomial", data=data[data$S==1,])
qyinit<-cbind(predict(yfit, newdata=data, type="response"),
predict(yfit, newdata=data.frame(cbind(data[,c(Wnames, "A", "Z")], M=0)), type="response"),
predict(yfit, newdata=data.frame(cbind(data[,c(Wnames, "A", "Z")], M=1)), type="response"))
#estimate weights for targeting
afit<-glm(formula=forms$Aform, family="binomial", data=data)
psa1s1<-predict(afit, newdata=data.frame(cbind(data[,Wnames], S=1)), type="response")
psa1<-predict(afit, newdata=data, type="response")
mz<-predict(glm(formula=forms$MformStratS, family="binomial", data=data[data$S==1,]), newdata=data, type="response")
psm<-(mz*data$M) + ((1-mz)*(1-data$M))
ps1w<-predict(glm(formula=forms$Sform, family="binomial", data=data), newdata=data, type="response")
ps0<-mean(1-data$S)
zfit<-glm(formula=forms$Zstarform, family="binomial", data=data)
pzs0<-predict(zfit, newdata=data.frame(cbind(data[,Wnames, drop = FALSE], A=data$A, S=0)), type="response")
pz<-predict(zfit, newdata=data, type="response")
pza1s0<-predict(zfit, newdata=data.frame(cbind(data[,Wnames, drop = FALSE], A=1, S=0)), type="response")
pza0s0<-predict(zfit, newdata=data.frame(cbind(data[,Wnames, drop=FALSE], A=0, S=0)), type="response")
pza1s1<-predict(zfit, newdata=data.frame(cbind(data[,Wnames, drop=FALSE], A=1, S=1)), type="response")
pza0s1<-predict(zfit, newdata=data.frame(cbind(data[,Wnames, drop=FALSE], A=0, S=1)), type="response")
pzs1<-pza1s1*psa1s1 + pza0s1*(1-psa1s1)
if(iv==TRUE){
ha1gma1<-(((data$M*gma1 + (1-data$M)*(1-gma1))* ( (data$Z*pza1s0) + ((1-data$Z)*(1-pza1s0)) ) *(1-ps1w)) / (psm* ( (data$Z*pzs1) + ((1-data$Z)*(1-pzs1)) )*ps1w*ps0))*I(data$S==1)*data$weights
ha1gma0<-(((data$M*gma0 + (1-data$M)*(1-gma0))* ( (data$Z*pza1s0) + ((1-data$Z)*(1-pza1s0)) ) *(1-ps1w)) / (psm* ( (data$Z*pzs1) + ((1-data$Z)*(1-pzs1)) )*ps1w*ps0))*I(data$S==1 )*data$weights
ha0gma0<-(((data$M*gma0 + (1-data$M)*(1-gma0))* ( (data$Z*pza0s0) + ((1-data$Z)*(1-pza0s0)) ) *(1-ps1w)) / (psm* ( (data$Z*pzs1) + ((1-data$Z)*(1-pzs1)) )*ps1w*ps0))*I(data$S==1 )*data$weights
}
else{
ha1gma1<-(((data$M*gma1 + (1-data$M)*(1-gma1))* ( (data$Z*pza1s0) + ((1-data$Z)*(1-pza1s0)) ) *(1-ps1w)) / (psm* ( (data$Z*pzs1) + ((1-data$Z)*(1-pzs1)) ) * psa1s1*ps1w*ps0))*I(data$S==1 & data$A==1)*data$weights
ha1gma0<-(((data$M*gma0 + (1-data$M)*(1-gma0))* ( (data$Z*pza1s0) + ((1-data$Z)*(1-pza1s0)) ) *(1-ps1w)) / (psm* ( (data$Z*pzs1) + ((1-data$Z)*(1-pzs1)) ) * psa1s1*ps1w*ps0))*I(data$S==1 & data$A==1)*data$weights
ha0gma0<-(((data$M*gma0 + (1-data$M)*(1-gma0))* ( (data$Z*pza0s0) + ((1-data$Z)*(1-pza0s0)) ) *(1-ps1w)) / (psm* ( (data$Z*pzs1) + ((1-data$Z)*(1-pzs1)) ) * (1-psa1s1)*ps1w*ps0))*I(data$S==1 & data$A==0)*data$weights
}
#target Q_Y
#for E(Y_{1,gma0})
epsilonma1g0<-coef(glm(Y ~ 1 , weights=ha1gma0, offset=(qlogis(qyinit[,1])), family="quasibinomial", data=data))
qystara1gma0<-cbind(plogis(qlogis(qyinit[,2]) + epsilonma1g0), plogis(qlogis(qyinit[,3]) + epsilonma1g0))
#for E(Y_{1,gma1})
epsilonma1g1<-coef(glm(Y ~ 1 , weights=ha1gma1, offset=(qlogis(qyinit[,1])), family="quasibinomial", data=data))
qystara1gma1<-cbind(plogis(qlogis(qyinit[,2]) + epsilonma1g1), plogis(qlogis(qyinit[,3]) + epsilonma1g1))
#for E(Y_{0,gma0})
epsilonma0g0<-coef(glm(Y ~ 1 , weights=ha0gma0, offset=(qlogis(qyinit[,1])), family="quasibinomial", data=data))
qystara0gma0<-cbind(plogis(qlogis(qyinit[,2]) + epsilonma0g0), plogis(qlogis(qyinit[,3]) + epsilonma0g0))
#estimate Q_M
data$Qma1g0<-qystara1gma0[,1]*(1-gma0) + qystara1gma0[,2]*gma0
data$Qma1g1<-qystara1gma1[,1]*(1-gma1) + qystara1gma1[,2]*gma1
data$Qma0g0<-qystara0gma0[,1]*(1-gma0) + qystara0gma0[,2]*gma0
#estimate Q_Z
Qzfita1g0<-glm(formula=paste("Qma1g0", forms$QZform, sep="~"), data=data[data$A==1,], family="quasibinomial")
Qzfita1g1<-glm(formula=paste("Qma1g1", forms$QZform, sep="~"), data=data[data$A==1,], family="quasibinomial")
Qzfita0g0<-glm(formula=paste("Qma0g0", forms$QZform, sep="~"), data=data[data$A==0,], family="quasibinomial")
Qza1g0<-predict(Qzfita1g0, type="response", newdata=data)
Qza1g1<-predict(Qzfita1g1, type="response", newdata=data)
Qza0g0<-predict(Qzfita0g0, type="response", newdata=data)
#update Q_Z
#Note: only need to do the update step if A is nonrandom
ha1<-(I(data$S==0 & data$A==1)/(psa1*ps0))*data$weights
ha0<-(I(data$S==0 & data$A==0)/((1-psa1)*ps0))*data$weights
epsilonza1g0<-coef(glm(Qma1g0~ 1 , data=data, weights=ha1, offset=qlogis(Qza1g0), family="quasibinomial"))
epsilonza1g1<-coef(glm(Qma1g1~ 1 , data=data, weights=ha1, offset=qlogis(Qza1g1), family="quasibinomial"))
epsilonza0g0<-coef(glm(Qma0g0~ 1 , data=data, weights=ha0, offset=qlogis(Qza0g0), family="quasibinomial"))
Qzupa1g0<-plogis(qlogis(Qza1g0) + epsilonza1g0)
Qzupa1g1<-plogis(qlogis(Qza1g1) + epsilonza1g1)
Qzupa0g0<-plogis(qlogis(Qza0g0) + epsilonza0g0)
#estimate psi
tmlea1m0<-sum(Qzupa1g0*data$weights*I(data$S==0))/sum(data$weights[data$S==0])
tmlea1m1<-sum(Qzupa1g1*data$weights*I(data$S==0))/sum(data$weights[data$S==0])
tmlea0m0<-sum(Qzupa0g0*data$weights*I(data$S==0))/sum(data$weights[data$S==0])
#estimands
nde<-tmlea1m0-tmlea0m0
nie<-tmlea1m1-tmlea1m0
scaling <- sum(data$S==0)/sum(data$weights[data$S==0])
#EIC
D_Y_11 <- scaling*ha1gma1*(data$Y - (qystara1gma1[,1]*(1-data$M) + qystara1gma1[,2]*data$M))
D_Y_10 <- scaling*ha1gma0*(data$Y - (qystara1gma0[,1]*(1-data$M) + qystara1gma0[,2]*data$M))
D_Y_00 <- scaling*ha0gma0*(data$Y - (qystara0gma0[,1]*(1-data$M) + qystara0gma0[,2]*data$M))
D_Z_11 <- scaling*ha1*(data$Qma1g1 - Qzupa1g1)
D_Z_10 <- scaling*ha1*(data$Qma1g0 - Qzupa1g0)
D_Z_00 <- scaling*ha0*(data$Qma0g0 - Qzupa0g0)
D_W_11 <- (Qzupa1g1*data$weights - tmlea1m1*(sum(data$weights[data$S==0])/nrow(data[data$S==0,]))) * I(data$S==0)/ps0
D_W_10 <- (Qzupa1g0*data$weights - tmlea1m0*(sum(data$weights[data$S==0])/nrow(data[data$S==0,]))) * I(data$S==0)/ps0
D_W_00 <- (Qzupa0g0*data$weights - tmlea0m0*(sum(data$weights[data$S==0])/nrow(data[data$S==0,]))) * I(data$S==0)/ps0
nde_eic<- (D_Y_10 + D_Z_10 + D_W_10) - (D_Y_00 + D_Z_00 + D_W_00)
nie_eic<- (D_Y_11 + D_Z_11 + D_W_11) - (D_Y_10 + D_Z_10 + D_W_10)
#sample variance
ndevar<-var(nde_eic)/nrow(data)
nievar<-var(nie_eic)/nrow(data)
return(list("nde"=nde, "nie"=nie, "ndevar"=ndevar, "nievar"=nievar))
}
| /R/transport_sdesie_function.R | no_license | kararudolph/transport | R | false | false | 8,836 | r | #this function uses TMLE to estimate the transported stochastic direct and indirect effect
#for observed data O=(S,W,A,Z,M,Y), where
#S=1 is the source population and S=0 is the target population
#W are covariates
#A is an instrumental variable
#Z is an intermediate variable that is affected by instrument A
#M is a mediator variable and a function of W, Z under Model 1
# and a function of W, Z, A under Model 2
#Y is an outcome variable and a function of W, Z, M under Model 1
# and a function of W, Z, M, A under Model 2
# Model 1 includes the exclusion restriction assumptions (there is no direct effect of A on M or of A on Y)
# Model 2 allows the exclusion restriction not to hold
get_gstarM = function(data, forms, Wnames, pooled, gstar_S) {
dataZ1 = dataZ0 = dataA1 = dataA0 = data
# Define the parameter as to the mechanism used for M and Z
dataZ1$Z = 1
dataZ0$Z = 0
dataA1$A = 1
dataA0$A = 0
if (!pooled) {
Mstarfit = glm(formula=forms$MformStratS, data=data[data$S==gstar_S, ], family = "binomial")
Zstarfit = glm(formula=forms$ZformStratS, data=data[data$S==gstar_S, ], family = "binomial")
predMz1 = predict(Mstarfit, newdata = dataZ1, type = 'response')
predMz0 = predict(Mstarfit, newdata = dataZ0, type = 'response')
predZa0 = predict(Zstarfit, newdata = dataA0, type = 'response')
predZa1 = predict(Zstarfit, newdata = dataA1, type = 'response')
} else {
Mstarfit = glm(formula=forms$Mstarform, data=data, family = "binomial")
Zstarfit = glm(formula=forms$Zstarform, data=data, family = "binomial")
dataZ1$S = gstar_S
dataZ0$S = gstar_S
dataA1$S = gstar_S
dataA0$S = gstar_S
predMz1 = predict(Mstarfit, newdata = dataZ1, type = 'response')
predMz0 = predict(Mstarfit, newdata = dataZ0, type = 'response')
predZa0 = predict(Zstarfit, newdata = dataA0, type = 'response')
predZa1 = predict(Zstarfit, newdata = dataA1, type = 'response')
}
gstarM_astar0 = predMz1*predZa0 + predMz0*(1 - predZa0)
gstarM_astar1 = predMz1*predZa1 + predMz0*(1 - predZa1)
return(list(gstarM_astar1 = gstarM_astar1, gstarM_astar0 = gstarM_astar0))
}
transportmedtmle<-function(data, forms, Wnames, iv, gma1, gma0){
#get inital fit Q_Y
yfit<-glm(formula=forms$Yform, family="binomial", data=data[data$S==1,])
qyinit<-cbind(predict(yfit, newdata=data, type="response"),
predict(yfit, newdata=data.frame(cbind(data[,c(Wnames, "A", "Z")], M=0)), type="response"),
predict(yfit, newdata=data.frame(cbind(data[,c(Wnames, "A", "Z")], M=1)), type="response"))
#estimate weights for targeting
afit<-glm(formula=forms$Aform, family="binomial", data=data)
psa1s1<-predict(afit, newdata=data.frame(cbind(data[,Wnames], S=1)), type="response")
psa1<-predict(afit, newdata=data, type="response")
mz<-predict(glm(formula=forms$MformStratS, family="binomial", data=data[data$S==1,]), newdata=data, type="response")
psm<-(mz*data$M) + ((1-mz)*(1-data$M))
ps1w<-predict(glm(formula=forms$Sform, family="binomial", data=data), newdata=data, type="response")
ps0<-mean(1-data$S)
zfit<-glm(formula=forms$Zstarform, family="binomial", data=data)
pzs0<-predict(zfit, newdata=data.frame(cbind(data[,Wnames, drop = FALSE], A=data$A, S=0)), type="response")
pz<-predict(zfit, newdata=data, type="response")
pza1s0<-predict(zfit, newdata=data.frame(cbind(data[,Wnames, drop = FALSE], A=1, S=0)), type="response")
pza0s0<-predict(zfit, newdata=data.frame(cbind(data[,Wnames, drop=FALSE], A=0, S=0)), type="response")
pza1s1<-predict(zfit, newdata=data.frame(cbind(data[,Wnames, drop=FALSE], A=1, S=1)), type="response")
pza0s1<-predict(zfit, newdata=data.frame(cbind(data[,Wnames, drop=FALSE], A=0, S=1)), type="response")
pzs1<-pza1s1*psa1s1 + pza0s1*(1-psa1s1)
if(iv==TRUE){
ha1gma1<-(((data$M*gma1 + (1-data$M)*(1-gma1))* ( (data$Z*pza1s0) + ((1-data$Z)*(1-pza1s0)) ) *(1-ps1w)) / (psm* ( (data$Z*pzs1) + ((1-data$Z)*(1-pzs1)) )*ps1w*ps0))*I(data$S==1)*data$weights
ha1gma0<-(((data$M*gma0 + (1-data$M)*(1-gma0))* ( (data$Z*pza1s0) + ((1-data$Z)*(1-pza1s0)) ) *(1-ps1w)) / (psm* ( (data$Z*pzs1) + ((1-data$Z)*(1-pzs1)) )*ps1w*ps0))*I(data$S==1 )*data$weights
ha0gma0<-(((data$M*gma0 + (1-data$M)*(1-gma0))* ( (data$Z*pza0s0) + ((1-data$Z)*(1-pza0s0)) ) *(1-ps1w)) / (psm* ( (data$Z*pzs1) + ((1-data$Z)*(1-pzs1)) )*ps1w*ps0))*I(data$S==1 )*data$weights
}
else{
ha1gma1<-(((data$M*gma1 + (1-data$M)*(1-gma1))* ( (data$Z*pza1s0) + ((1-data$Z)*(1-pza1s0)) ) *(1-ps1w)) / (psm* ( (data$Z*pzs1) + ((1-data$Z)*(1-pzs1)) ) * psa1s1*ps1w*ps0))*I(data$S==1 & data$A==1)*data$weights
ha1gma0<-(((data$M*gma0 + (1-data$M)*(1-gma0))* ( (data$Z*pza1s0) + ((1-data$Z)*(1-pza1s0)) ) *(1-ps1w)) / (psm* ( (data$Z*pzs1) + ((1-data$Z)*(1-pzs1)) ) * psa1s1*ps1w*ps0))*I(data$S==1 & data$A==1)*data$weights
ha0gma0<-(((data$M*gma0 + (1-data$M)*(1-gma0))* ( (data$Z*pza0s0) + ((1-data$Z)*(1-pza0s0)) ) *(1-ps1w)) / (psm* ( (data$Z*pzs1) + ((1-data$Z)*(1-pzs1)) ) * (1-psa1s1)*ps1w*ps0))*I(data$S==1 & data$A==0)*data$weights
}
#target Q_Y
#for E(Y_{1,gma0})
epsilonma1g0<-coef(glm(Y ~ 1 , weights=ha1gma0, offset=(qlogis(qyinit[,1])), family="quasibinomial", data=data))
qystara1gma0<-cbind(plogis(qlogis(qyinit[,2]) + epsilonma1g0), plogis(qlogis(qyinit[,3]) + epsilonma1g0))
#for E(Y_{1,gma1})
epsilonma1g1<-coef(glm(Y ~ 1 , weights=ha1gma1, offset=(qlogis(qyinit[,1])), family="quasibinomial", data=data))
qystara1gma1<-cbind(plogis(qlogis(qyinit[,2]) + epsilonma1g1), plogis(qlogis(qyinit[,3]) + epsilonma1g1))
#for E(Y_{0,gma0})
epsilonma0g0<-coef(glm(Y ~ 1 , weights=ha0gma0, offset=(qlogis(qyinit[,1])), family="quasibinomial", data=data))
qystara0gma0<-cbind(plogis(qlogis(qyinit[,2]) + epsilonma0g0), plogis(qlogis(qyinit[,3]) + epsilonma0g0))
#estimate Q_M
data$Qma1g0<-qystara1gma0[,1]*(1-gma0) + qystara1gma0[,2]*gma0
data$Qma1g1<-qystara1gma1[,1]*(1-gma1) + qystara1gma1[,2]*gma1
data$Qma0g0<-qystara0gma0[,1]*(1-gma0) + qystara0gma0[,2]*gma0
#estimate Q_Z
Qzfita1g0<-glm(formula=paste("Qma1g0", forms$QZform, sep="~"), data=data[data$A==1,], family="quasibinomial")
Qzfita1g1<-glm(formula=paste("Qma1g1", forms$QZform, sep="~"), data=data[data$A==1,], family="quasibinomial")
Qzfita0g0<-glm(formula=paste("Qma0g0", forms$QZform, sep="~"), data=data[data$A==0,], family="quasibinomial")
Qza1g0<-predict(Qzfita1g0, type="response", newdata=data)
Qza1g1<-predict(Qzfita1g1, type="response", newdata=data)
Qza0g0<-predict(Qzfita0g0, type="response", newdata=data)
#update Q_Z
#Note: only need to do the update step if A is nonrandom
ha1<-(I(data$S==0 & data$A==1)/(psa1*ps0))*data$weights
ha0<-(I(data$S==0 & data$A==0)/((1-psa1)*ps0))*data$weights
epsilonza1g0<-coef(glm(Qma1g0~ 1 , data=data, weights=ha1, offset=qlogis(Qza1g0), family="quasibinomial"))
epsilonza1g1<-coef(glm(Qma1g1~ 1 , data=data, weights=ha1, offset=qlogis(Qza1g1), family="quasibinomial"))
epsilonza0g0<-coef(glm(Qma0g0~ 1 , data=data, weights=ha0, offset=qlogis(Qza0g0), family="quasibinomial"))
Qzupa1g0<-plogis(qlogis(Qza1g0) + epsilonza1g0)
Qzupa1g1<-plogis(qlogis(Qza1g1) + epsilonza1g1)
Qzupa0g0<-plogis(qlogis(Qza0g0) + epsilonza0g0)
#estimate psi
tmlea1m0<-sum(Qzupa1g0*data$weights*I(data$S==0))/sum(data$weights[data$S==0])
tmlea1m1<-sum(Qzupa1g1*data$weights*I(data$S==0))/sum(data$weights[data$S==0])
tmlea0m0<-sum(Qzupa0g0*data$weights*I(data$S==0))/sum(data$weights[data$S==0])
#estimands
nde<-tmlea1m0-tmlea0m0
nie<-tmlea1m1-tmlea1m0
scaling <- sum(data$S==0)/sum(data$weights[data$S==0])
#EIC
D_Y_11 <- scaling*ha1gma1*(data$Y - (qystara1gma1[,1]*(1-data$M) + qystara1gma1[,2]*data$M))
D_Y_10 <- scaling*ha1gma0*(data$Y - (qystara1gma0[,1]*(1-data$M) + qystara1gma0[,2]*data$M))
D_Y_00 <- scaling*ha0gma0*(data$Y - (qystara0gma0[,1]*(1-data$M) + qystara0gma0[,2]*data$M))
D_Z_11 <- scaling*ha1*(data$Qma1g1 - Qzupa1g1)
D_Z_10 <- scaling*ha1*(data$Qma1g0 - Qzupa1g0)
D_Z_00 <- scaling*ha0*(data$Qma0g0 - Qzupa0g0)
D_W_11 <- (Qzupa1g1*data$weights - tmlea1m1*(sum(data$weights[data$S==0])/nrow(data[data$S==0,]))) * I(data$S==0)/ps0
D_W_10 <- (Qzupa1g0*data$weights - tmlea1m0*(sum(data$weights[data$S==0])/nrow(data[data$S==0,]))) * I(data$S==0)/ps0
D_W_00 <- (Qzupa0g0*data$weights - tmlea0m0*(sum(data$weights[data$S==0])/nrow(data[data$S==0,]))) * I(data$S==0)/ps0
nde_eic<- (D_Y_10 + D_Z_10 + D_W_10) - (D_Y_00 + D_Z_00 + D_W_00)
nie_eic<- (D_Y_11 + D_Z_11 + D_W_11) - (D_Y_10 + D_Z_10 + D_W_10)
#sample variance
ndevar<-var(nde_eic)/nrow(data)
nievar<-var(nie_eic)/nrow(data)
return(list("nde"=nde, "nie"=nie, "ndevar"=ndevar, "nievar"=nievar))
}
|
#' Simulates catch at age and catch history data for testing SRA methods
#'
#' @description Catch at age and catch simulator.
#' @param OM An operating model object with M, growth, stock-recruitment and maturity parameters specified.
#' @param qmult Fraction of natural mortality rate that is mean fishing mortality (Fishing catchability multiplier)
#' @param CAApatchy The fraction of years that have catch at age data
#' @param Cpatchy The fraction of years that have catch data
#' @param Ipatchy The fraction of years that have index data
#' @param MLpatchy The fraction of years that have mean length data
#' @param nCAA The number of independent annual catch at age observations (same among all years)
#' @param nL The number of independent annual catch at length observations (same among all years) for calculating mean length
#' @param sigmaE Level of simulated interannual variability in effort (F) expressed as a lognormal SD
#' @param sigmaI Observation error in relative abundance indices expressed as a lognormal SD
#' @return A list: Chist = historical catch series,Recdevs = historical recruitment deviations (mean = 1), CAA = catch at age matrix, N = numbers at age matrix, SSB = annual spawning biomass, FM = Fishing mortality rate at age matrix, M = natural mortality rate \code{classy}
#' @author T. Carruthers (Canadian DFO grant)
#' @export SRAsim
#' @examples
#' out<-SRAsim(testOM)
SRAsim<-function(OM,qmult=0.5,CAApatchy=0.4,Cpatchy=1,Ipatchy=0.4,MLpatchy=0.4,nCAA=100,nL=200,sigmaE=0.25,sigmaI=0.1){
maxage<-OM@maxage
nyears<-OM@nyears
M<-mean(OM@M) # Natural mortality rate
h<-mean(OM@h) # Steepness
Linf<-mean(OM@Linf)
K<-mean(OM@K) # Maximum growth rate
t0<-mean(OM@t0) # Theorectical length at age zero
lenM <- mean(OM@L50)
L50_90<-mean(OM@L50_95)
len95 <- lenM+L50_90
ageM <- -((log(1-lenM/Linf))/K) + t0 # calculate ageM from L50 and growth parameters (non-time-varying)
age95 <- -((log(1-len95/Linf))/K) + t0
Mat_age <- 1/(1 + exp(-log(19) * (((1:maxage) - ageM)/(age95 - ageM))))
Len_age<-Linf*(1-exp(-K*((1:maxage)-t0)))
Wt_age<-OM@a*Len_age^OM@b
M_age<-rep(M,maxage)
sel<-Mat_age
eff<-(sin((nyears:1)/18)+1)*exp(rnorm(nyears,1,sigmaE))
apFM<-eff/mean(eff)*qmult*M
N<-CAA<-array(NA,c(nyears,maxage))
Chist<-SSB<-ML<-rep(NA,nyears)
FM<-array(rep(apFM,maxage)*rep(sel,each=nyears),c(nyears,maxage))
Z<-array(FM+rep(M_age,each=nyears),c(nyears,maxage))
R0<-10000
sigma<-mean(OM@Perr)
Recdevs<-trlnorm(nyears-1+maxage,1,sigma)
Recdevs<-Recdevs/mean(Recdevs)
N0<-R0*exp(-M*(0:(maxage-1)))
SSB0<-sum(N0*Mat_age*Wt_age)
SSBpR<-SSB0/R0
N1<-N0*Recdevs[maxage:1]
for(y in 1:nyears){
if(y==1){
N[1,]<-N1
}else{
N[y,]<-N[y-1,]
}
SSB[y]<-sum(N[y,]*Mat_age*Wt_age)
CAA[y,]<-N[y,]*exp(-M_age/2)*(1-exp(-FM[y,]))
ML[y]<-mean(sample(rep(Len_age,CAA[y,]),nL))
Chist[y]<-sum(CAA[y,]*Wt_age)
N[y,]<-N[y,]*exp(-Z[y,])
N[y,2:maxage]<-N[y,1:(maxage-1)]
N[y,1]<-Recdevs[maxage+y]*(0.8 * R0 * h * SSB[y])/
(0.2*SSBpR*R0*(1-h)+(h-0.2)*SSB[y])
}
for(y in 1:nyears)CAA[y,]<-rmultinom(1,nCAA,CAA[y,])
CAA[sample(1:nyears,size=nyears*(1-Cpatchy),replace=F),]<-NA
par(mfrow=c(4,2),mai=c(0.8,0.8,0.05,0.05))
plot(SSB,type="l",xlab="Year",ylim=c(0,max(SSB)))
plot(Chist,type="l",xlab="Year",ylim=c(0,max(Chist)))
plot(apFM,type="l",xlab="Year",ylab="ApicalF")
plot(sel,type="l",xlab="Age",ylab="Selectivity")
plot((-maxage+2):nyears,Recdevs,xlab="Year")
abline(v=0.5,col='blue')
plot(c(1,nyears),c(1,maxage),col='white',xlab="Year",ylab="Age")
legend("top",legend="Catch composition data",bty='n')
points(rep(1:nyears,maxage),rep(1:maxage,each=nyears),cex=CAA^0.5/max(CAA^0.5,na.rm=T)*2.5)
Ind<-trlnorm(nyears,1,sigmaI)*(SSB/SSB[1])
Ind[sample(1:nyears,size=nyears*(1-Ipatchy),replace=F)]<-NA
Ind<-Ind/mean(Ind,na.rm=T)
ML[sample(1:nyears,size=nyears*(1-MLpatchy),replace=F)]<-NA
plot(1:nyears,Ind,xlab="Year",ylim=c(0,max(Ind,na.rm=T)))
plot(1:nyears,ML,xlab="Year")
return(list(Chist=Chist,Recdevs=Recdevs,CAA=CAA,Ind=Ind,ML=ML,N=N,SSB=SSB,FM=FM,M=M,SSB0=SSB0,sel=sel))
}
# helper functions
LSRA_cppWrapper <- function(param, FF_a, Chist, M_a, Mat_age_a, Wt_age_a, sel_a,
Recdevs_a, h_a, Umax=0.5, mode=1) {
LSRA_opt_cpp(param, FF_a, Chist, M_a, Mat_age_a, Wt_age_a, sel_a,
Recdevs_a, h_a, Umax)[[mode]]
}
LSRA_cpp <-function(x,FF,Chist_arr,M,Mat_age,Wt_age,sel,Recdevs,h){
maxage<-ncol(Mat_age)
SSB0guess<-sum(Chist_arr[x,])*c(0.05,100)
SSBpR<-sum(exp(-M[x]*(0:(maxage-1)))*Mat_age[x,]*Wt_age[x,])
R0range=SSB0guess/SSBpR
# Modes 1:obj 2:Fpred 3:depletion 4:R0 5:Ffit
opt<-optimize(LSRA_cppWrapper,interval=log(R0range),
FF_a=FF[x],
Chist=Chist_arr[x,],
M_a=M[x],
Mat_age_a=Mat_age[x,],
Wt_age_a=Wt_age[x,],
sel_a=sel[x,],
Recdevs_a=Recdevs[x,],
h_a=h[x], mode=1)
opt$minimum
}
#' Stochastic SRA construction of operating models
#'
#' @description Specify an operating model, using catch composition data and a historical catch series.
#' Returns and operating model with depletion (D), selectivity parameters (L5, LFS) and effort trajectory (Effyears, EffLower, EffUpper) filled.
#' Modified version using cpp code.
#' @param OM An operating model object with M, growth, stock-recruitment and maturity parameters specified.
#' @param CAA A matrix nyears (rows) by nages (columns) of catch at age (age 1 to maxage in length)
#' @param Chist A vector of historical catch observations (nyears long) going back to unfished conditions
#' @param Ind A vector of historical index observations (nyears long, may be patchy with NAs) going back to unfished conditions.
#' @param Cobs A numeric value representing catch observation error as a log normal sd
#' @param sigmaR A numeric value representing the prior standard deviation of log space recruitment deviations
#' @param Umax A numeric value representing the maximum harvest rate for any age class (rejection of sims where this occurs)
#' @param nsim The number desired draws of parameters / effort trajectories
#' @param proyears The number of projected MSE years
#' @param Jump_fac A multiplier of the jumping distribution variance to increase acceptance (lower Jump_fac) or decrease acceptance rate (higher Jump_fac)
#' @param nits The number of MCMC iterations
#' @param burnin The number of initial MCMC iterations to discard
#' @param thin The interval over which MCMC samples are extracted for use in graphing / statistics
#' @param ESS Effective sample size - the weighting of the catch at age data
#' @param ploty Do you want to see diagnostics plotted?
#' @param nplot how many MCMC samples should be plotted in convergence plots?
#' @param SRAdir A directory where the SRA diagnostics / fit are stored
#' @return A list with three positions. Position 1 is the filled OM object, position 2 is the custompars data.frame that may be submitted as an argument to runMSE() and position 3 is the matrix of effort histories `[nyears x nsim]` vector of objects of class\code{classy}
#' @author T. Carruthers (Canadian DFO grant)
#' @references Walters, C.J., Martell, S.J.D., Korman, J. 2006. A stochastic approach to stock reduction analysis. Can. J. Fish. Aqua. Sci. 63:212-213.
#' @examples
#' \dontrun{
#' setup()
#' sim<-SRAsim(testOM,patchy=0.8)
#' CAA<-sim$CAA
#' Chist<-sim$Chist
#' testOM<-StochasticSRA(testOM,CAA,Chist,nsim=30,nits=1000)
#' runMSE(testOM)
#' }
StochasticSRAcpp <-function(OM,CAA,Chist,Ind,Cobs=0.1,sigmaR=0.5,Umax=0.9,nsim=48,proyears=50,
Jump_fac=1,nits=20000,
burnin=1000,thin=50,ESS=300,ploty=T,nplot=6,SRAdir=NA){
OM <- updateMSE(OM) # Check that all required slots in OM object contain values
nyears<-length(Chist)
if(class(Chist)=="matrix")nyears<-nrow(Chist)
maxage<-OM@maxage
if (OM@nyears != nyears) {
message("OM@nyears being updated to length Chist: ", nyears)
OM@nyears <- nyears
}
if (dim(CAA)[1] != nyears) stop("Number of CAA rows (", dim(CAA)[1], ") does not equal nyears (", nyears, "). NAs are acceptable")
if (dim(CAA)[2] != OM@maxage) {
message("Number of CAA columns (", dim(CAA)[2], ") does not equal OM@maxage (", OM@maxage, ")")
message("Assuming no CAA for ages greater than ", dim(CAA)[2], ' and filling with 0s')
addages <- OM@maxage-dim(CAA)[2]
CAA2 <- matrix(0, nrow=nrow(CAA), ncol=addages)
CAA <- cbind(CAA, CAA2)
}
if (burnin < 0.05*nits) burnin <- 0.05 * nits
if("nsim"%in%slotNames(OM))nsim<-OM@nsim
if("proyears"%in%slotNames(OM))proyears<-OM@proyears
OM@nsim<-nsim
OM@proyears<-proyears
# Sample custom parameters
SampCpars <- list() # empty list
# custom parameters exist - sample and write to list
if(length(OM@cpars)>0){
# ncparsim<-cparscheck(OM@cpars) # check each list object has the same length and if not stop and error report
SampCpars <- SampleCpars(OM@cpars, nsim)
}
# Sample Stock Parameters
options(warn=-1)
StockPars <- SampleStockPars(OM, nsim, nyears, proyears, SampCpars)
options(warn=1)
# Assign Stock pars to function environment
for (X in 1:length(StockPars)) assign(names(StockPars)[X], StockPars[[X]])
agearr<-array(rep(1:maxage,each=nsim),c(nsim,maxage))
Wt_age <- Wt_age[,,nyears] # no time-varying growth
Mat_age<- Mat_age[,,nyears]
# Sample Fleet Parameters
options(warn=-1)
FleetPars <- SampleFleetPars(SubOM(OM, "Fleet"), Stock=StockPars, nsim,
nyears, proyears, cpars=SampCpars)
options(warn=1)
# Assign Fleet pars to function environment
for (X in 1:length(FleetPars)) assign(names(FleetPars)[X], FleetPars[[X]])
# Sample historical catch
Chist_a<-array(trlnorm(nyears*nsim,1,Cobs)*rep(Chist,each=nsim),c(nsim,nyears)) # Historical catch
# set up mcmc
lnR0<-lninfl<-lnslp<-array(NA,c(nsim,nits))
lnRD<-array(0,c(nsim,nyears+maxage,nits))
LHD<-array(NA,c(nsim,nits))
if(sfIsRunning()){
R0LB<-sfSapply(1:nsim,LSRA_cpp,FF=M*4,Chist_arr=Chist_a,M=M,Mat_age=Mat_age,Wt_age=Wt_age,
sel=Mat_age,Recdevs=array(1,c(nsim,nyears+maxage)),h=hs)
R0UB<-sfSapply(1:nsim,LSRA_cpp,FF=M/10,Chist_arr=Chist_a,M=M,Mat_age=Mat_age,Wt_age=Wt_age,
sel=Mat_age,Recdevs=array(1,c(nsim,nyears+maxage)),h=hs)
}else{
R0LB <- sapply(1:nsim,LSRA_cpp,FF=M*4,Chist_arr=Chist_a,M=M,Mat_age=Mat_age,Wt_age=Wt_age,
sel=Mat_age,Recdevs=array(1,c(nsim,nyears+maxage)),h=hs)
R0UB <- sapply(1:nsim,LSRA_cpp,FF=M/10,Chist_arr=Chist_a,M=M,Mat_age=Mat_age,Wt_age=Wt_age,
sel=Mat_age,Recdevs=array(1,c(nsim,nyears+maxage)),h=hs)
}
R0b <- cbind(R0LB-1,R0UB+1)
inflb<-log(c(0.5,maxage*0.5))
slpb<-log(exp(inflb)*c(0.1,2))#c(-3,3)
RDb<-c(-2,2)
# initial guesses
lnR0[,1]<-R0UB#log(apply(Chist_a,1,mean))
lninfl[,1]<-log(maxage/4)
lnslp[,1]<-log(exp(lninfl[,1])*0.2)
lnRD[,,1]<-0
# parameters
pars <- cbind(lnR0[,1], lninfl[,1], lnslp[,1], lnRD[,,1])
npars<-ncol(pars)
# parameter store
LHstr<-array(NA,c(nsim,nits))
parstr<-array(NA,c(nsim, npars,nits))
# parameter indexes
R0ind <-1
inflind <- 2
slpind <- 3
RDind <- 4:npars
# Parameter jumping
JumpCV<-rep(0.05,npars) # R0
JumpCV[inflind]<-0.05
JumpCV[slpind]<-0.05
JumpCV[RDind]<-0.1*sigmaR # a function of sigmaR to provide reasonable acceptance rate
JumpCV<-JumpCV*Jump_fac
# parameter censorship
parLB<-parUB<-matrix(NA, nsim, npars)
parLB[,1]<-R0b[,1]
parLB[,2]<-inflb[1]
parLB[,3]<-slpb[1]
parLB[,4:npars]<-RDb[1]
parUB[,1]<-R0b[,2]
parUB[,2]<-inflb[2]
parUB[,3]<-slpb[2]
parUB[,4:npars]<-RDb[2]
CAAadj=sum(CAA,na.rm=T)/ESS # ESS adjustment to low sample sizes
# update<-(1:50)*(nits/50)
adapt<-c(rep(5,100),rep(2.5,100),rep(1,nits-200))
message("Running MCMC (may take a while!)")
if (snowfall::sfIsRunning()) {
mcmc <- snowfall::sfSapply(1:nsim, function(sim) {
#LSRA_MCMC_sim(nits=nits, pars[sim,], JumpCV, adapt, parLB[sim,], parUB[sim,], R0ind-1,
# inflind-1, slpind-1, RDind-1, nyears, maxage, M[sim], Mat_age[sim,],
# Wt_age[sim,], Chist_a[sim,], Ind, Umax, hs[sim], CAA, CAAadj, sigmaR, sigmaI)
LSRA_MCMC_sim(nits=nits, pars[sim,], JumpCV, adapt, parLB[sim,], parUB[sim,], R0ind-1,
inflind-1, slpind-1, RDind-1, nyears, maxage, M[sim], Mat_age[sim,],
Wt_age[sim,], Chist_a[sim,], Umax, hs[sim], CAA, CAAadj, sigmaR)
})
}else {
mcmc <- sapply(1:nsim, function(sim) {
cat(".")
# LSRA_MCMC_sim(nits=nits, pars[sim,], JumpCV, adapt, parLB[sim,], parUB[sim,], R0ind-1,
# inflind-1, slpind-1, RDind-1, nyears, maxage, M[sim], Mat_age[sim,],
# Wt_age[sim,], Chist_a[sim,], Ind, Umax, hs[sim], CAA, CAAadj, sigmaR, sigmaI)
LSRA_MCMC_sim(nits=nits, pars[sim,], JumpCV, adapt, parLB[sim,], parUB[sim,], R0ind-1,
inflind-1, slpind-1, RDind-1, nyears, maxage, M[sim], Mat_age[sim,],
Wt_age[sim,], Chist_a[sim,], Umax, hs[sim], CAA, CAAadj, sigmaR)
})
}
parstr <- aperm(array(unlist(mcmc[1,]), dim=c(npars, nits, nsim)), c(3,1,2))
CAA_pred <- aperm(array(unlist(mcmc[2,]), dim=c(nyears, maxage, nsim)), c(3,1,2))
SSB <- aperm(array(unlist(mcmc[3,]), dim=c(nyears, nsim)), c(2,1))
SSB0 <- unlist(mcmc[4,])
RD <- aperm(array(unlist(mcmc[5,]), dim=c(nyears+maxage, nsim)), c(2,1))
PredF <- aperm(array(unlist(mcmc[6,]), dim=c(nyears, nsim)), c(2,1))
sel <- aperm(array(unlist(mcmc[7,]), dim=c(maxage, nsim)), c(2,1))
if(!is.na(SRAdir))jpeg(paste0(SRAdir,"/SRA_convergence.jpg"),width=7,height=9,units='in',res=400)
if(ploty){
col<-rep(c("blue","red","green","orange","grey","brown","pink","yellow","dark red","dark blue","dark green"),100)
par(mfcol=c(5,2),mai=c(0.7,0.6,0.05,0.1))
pind<-(1:(nits/thin))*thin
matplot(pind,t(parstr[1:nplot,1,pind]),type='l',ylab="log R0",xlab="Iteration")
abline(v=burnin,lty=2)
matplot(pind,t(parstr[(1:nplot),2, pind]),type='l',ylab="log infl (sel)",xlab="Iteration")
abline(v=burnin,lty=2)
matplot(pind,t(parstr[(1:nplot),3,pind]),type='l',ylab="log slp (sel)",xlab="Iteration")
abline(v=burnin,lty=2)
matplot(pind,t(parstr[(1:nplot),4, pind]),type='l',ylab="recdev1",xlab="Iteration")
abline(v=burnin,lty=2)
matplot(pind,t(parstr[(1:nplot), npars, pind]),type='l',ylab="recdev2",xlab="Iteration")
abline(v=burnin,lty=2)
burn<-burnin:nits
plot(density(parstr[, 1,burn],adj=0.7),xlab="log(R0)",main="")
plot(density(parstr[, 2,burn],adj=0.7),xlab="inflection selectivity",main="")
plot(density(parstr[, 3,burn],adj=0.7),xlab="slope selectivity",main="")
plot(density(parstr[, 4, burn],adj=0.7),xlab="recdev1",main="")
plot(density(parstr[, npars, burn],adj=0.7),xlab="recdev2",main="")
}
if(!is.na(SRAdir))dev.off()
if(!is.na(SRAdir))jpeg(paste0(SRAdir,"/SRA predictions.jpg"),width=7,height=11,units='in',res=400)
if(ploty){
par(mfrow=c(6,2),mai=c(0.65,0.6,0.02,0.1))
qq<-apply(SSB,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(qq))
matplot(t(SSB[1:nplot,]),ylim=ylim,type="l",xlab="Year",ylab="SSB")
xs<-dim(SSB)[2]
plot(qq[3,],ylim=ylim,type='l',xlab="Year",ylab="SSB")
polygon(c(1:xs,xs:1),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(1:xs,xs:1),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(qq[3,],lwd=1,col="white")
D<-SSB/SSB0
qq<-apply(D,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(qq))
matplot(t(D[1:nplot,]),ylim=ylim,type="l",xlab="Year",ylab="Depletion")
plot(qq[3,],ylim=ylim,type='l',xlab="Year",ylab="Depletion")
polygon(c(1:xs,xs:1),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(1:xs,xs:1),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(qq[3,],lwd=1,col="white")
qq<-apply(PredF,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(qq))
matplot(t(PredF[1:nplot,]),ylim=ylim,type="l",xlab="Year",ylab="Fish. Mort.")
plot(qq[3,],ylim=ylim,type='l',xlab="Year",ylab="Fish. Mort.")
polygon(c(1:xs,xs:1),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(1:xs,xs:1),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(qq[3,],lwd=1,col="white")
nyears<-dim(CAA)[1]
nages<-dim(CAA)[2]
qq<-apply(sel,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(qq))
xs<-maxage
matplot(t(sel[1:nplot,]),ylim=ylim,type="l",xlab="Age",ylab="Selectivity")
plot(qq[3,],ylim=ylim,type='l',xlab="Age",ylab="Selectivity")
polygon(c(1:xs,xs:1),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(1:xs,xs:1),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(qq[3,],lwd=1,col="white")
RDx<-(-maxage+1):nyears
qq<-apply(RD,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(qq))
xs<-dim(RD)[2]
matplot(RDx,t(RD[1:nplot,]),ylim=ylim,type="l",xlab="Year",ylab="Rec. Dev.")
plot(RDx,qq[3,],ylim=ylim,type='l',xlab="Year",ylab="Rec. Dev.")
polygon(c(RDx,RDx[xs:1]),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(RDx,RDx[xs:1]),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(RDx,qq[3,],lwd=1,col="white")
plot(c(1,nyears),c(1,nages),col='white',xlab="Year",ylab="Age")
legend("top",legend="Observed composition data",bty='n')
points(rep(1:nyears,nages),rep(1:nages,each=nyears),cex=CAA^0.5/max(CAA^0.5,na.rm=T)*1.5,pch=19,col=makeTransparent("dark grey",60))
CAA_pred1<-CAA_pred
CAA_pred1[CAA_pred1<0.002]<-NA
plot(c(1,dim(CAA)[1]),c(1,dim(CAA)[2]),col='white',xlab="Year",ylab="Age")
legend("top",legend="Predicted composition data (1 sim)",bty='n')
points(rep(1:nyears,nages),rep(1:nages,each=nyears),cex=CAA_pred1[1,,]^0.5/max(CAA_pred1[1,,]^0.5,na.rm=T)*1.5,pch=19,col=makeTransparent("dark grey",60))
}
if(!is.na(SRAdir))dev.off()
dep<-SSB[,nyears]/SSB0
procsd<-apply(RD,1,sd,na.rm=T)
procmu <- -0.5 * (procsd)^2 # adjusted log normal mean
OM@D<-quantile(dep,c(0.05,0.95))
OM@Perr<-quantile(procsd,c(0.025,0.975))
getAC<-function(recdev)acf(recdev,plot=F)$acf[2,1,1]
AC<-apply(RD,1,getAC)
OM@AC<-quantile(AC,c(0.05,0.95))
R0 <- exp(parstr[,1,nits])
slp <- exp(parstr[,3,nits])
infl <- exp(parstr[,2,nits])
A5<--(slp*log(1/0.05-1)-infl)
A5[A5 < 0] <- 0
A95<--(slp*log(1/0.95-1)-infl)
L5<-Linf*(1-exp(-K*(A5-t0)))
L95<-Linf*(1-exp(-K*(A95-t0)))
OM@L5<-quantile(L5,c(0.05,0.95))
OM@LFS<-quantile(L95,c(0.05,0.95))
OM@nyears<-nyears
OM@EffYears<-1:OM@nyears
OM@EffLower<-apply(PredF,2,quantile,p=0.05)
OM@EffUpper<-apply(PredF,2,quantile,p=0.95)
OM@nyears<-nyears
Perr<-array(NA,c(nsim,maxage+nyears+proyears-1))
Perr[,1:(nyears+maxage-1)]<-log(RD[,2:(maxage+nyears)])
Perr[,(nyears+maxage):(nyears+maxage+proyears-1)]<-matrix(rnorm(nsim*(proyears),rep(procmu,proyears),rep(procsd,proyears)),nrow=nsim)
for (y in (maxage+nyears):(nyears + proyears+maxage-1)) Perr[, y] <- AC * Perr[, y - 1] + Perr[, y] * (1 - AC * AC)^0.5
Perr<-exp(Perr)
PredF<-PredF/apply(PredF,1,mean) # Find should be mean 1 so qs optimizers are standardized
Wt_age <- array(Wt_age, dim=c(dim=c(nsim, maxage, nyears+proyears)))
Len_age <- array(Len_age, dim=c(nsim, maxage, nyears+proyears))
Marray <- matrix(M, nrow=nsim, ncol=proyears+nyears)
OM@cpars<-list(dep=dep,M=M,procsd=procsd,AC=AC,hs=hs,Linf=Linf,
Wt_age=Wt_age, Len_age=Len_age, Marray=Marray,
K=K,t0=t0,L50=L50,
L5=L5,LFS=L95,Find=PredF,
V=array(sel,c(nsim,maxage,nyears)),Perr=Perr,R0=R0,
SSB=SSB,SSB0=SSB0,RD=RD) # not valid for runMSE code but required
OM
}
#' Stochastic SRA construction of operating models
#'
#' @description Specify an operating model, using catch composition data and a historical catch series. Returns and operating model with depletion (D), selectivity parameters (L5, LFS) and effort trajectory (Effyears, EffLower, EffUpper) filled.
#' @param OM An operating model object with M, growth, stock-recruitment and maturity parameters specified.
#' @param CAA A matrix nyears (rows) by nages (columns) of catch at age (age 1 to maxage in length)
#' @param Chist A vector of historical catch observations (nyears long) going back to unfished conditions
#' @param Ind A vector of historical abundance index observations (assumed proportional to SSB)
#' @param ML A vector of historical mean length (in catch) observations
#' @param CAL A matrix of nyears (row) by n length bins (columns) of catch at length samples
#' @param mulen A vector mean length by length bin, a vector the same as the number of columns of CAL
#' @param wts A vector of relative weights for the likelihood functions of CAA, Chist, Ind, ML and CAL
#' @param Jump_fac A multiplier of the jumping distribution variance to increase acceptance (lower Jump_fac) or decrease acceptance rate (higher Jump_fac)
#' @param nits The number of MCMC iterations
#' @param burnin The number of initial MCMC iterations to discard
#' @param thin The interval over which MCMC samples are extracted for use in graphing / statistics
#' @param ESS Effective sample size - the weighting of the catch at age data
#' @param MLsd The lognormal sd of the mean length observations
#' @param ploty Do you want to see diagnostics plotted?
#' @param nplot how many MCMC samples should be plotted in convergence plots?
#' @param SRAdir A directory where the SRA diagnostics / fit are stored
#' @return A list with three positions. Position 1 is the filled OM object, position 2 is the custompars data.frame that may be submitted as an argument to runMSE() and position 3 is the matrix of effort histories `[nyears x nsim]` vector of objects of class\code{classy}
#' @author T. Carruthers (Canadian DFO grant)
#' @references Walters, C.J., Martell, S.J.D., Korman, J. 2006. A stochastic approach to stock reduction analysis. Can. J. Fish. Aqua. Sci. 63:212-213.
#' @export StochasticSRA
#' @examples
#' \dontrun{
#' setup()
#' sim<-SRAsim(testOM,patchy=0.8)
#' CAA<-sim$CAA
#' Chist<-sim$Chist
#' testOM<-StochasticSRA(testOM,CAA,Chist,nsim=30,nits=1000)
#' runMSE(testOM)
#' }
StochasticSRA<-function(OM,CAA,Chist,Ind=NA,ML=NA,CAL=NA,mulen=NA,wts=c(1,1,0.5,0.1,1),
Jump_fac=1,nits=4000, burnin=500,thin=10,ESS=300,MLsd=0.1,
ploty=T,nplot=6,SRAdir=NA){
OM <- updateMSE(OM) # Check that all required slots in OM object contain values
nyears<-length(Chist)
if(class(Chist)=="matrix")nyears<-nrow(Chist)
maxage<-OM@maxage
if(length(Ind)==1){
Ind<-rep(NA,nyears)
}else{
if(sum(is.na(Ind))<nyears)Ind<-Ind/mean(Ind,na.rm=T) # normalize Ind to mean 1
}
if(length(ML)==1)ML<-rep(NA,nyears)
Umax<-1-exp(-OM@maxF) # get SRA umax from OM
Imiss<-is.na(Ind) # which SSB index observations are missing?
proyears<-OM@proyears
nsim<-OM@nsim
Cobs<-runif(nsim,OM@Cobs[1],OM@Cobs[2]) # sample observation error
Iobs=runif(nsim,OM@Iobs[1],OM@Iobs[2]) # use the OM obs error for index
if (OM@nyears != nyears) {
message("OM@nyears being updated to length Chist: ", nyears)
OM@nyears <- nyears
}
if(sum(is.na(Chist))>0){
message("One or more of the historical annual catch observations is missing. Linear interpolation has been used to fill these data")
Chistold<-Chist
Chist<-approx(Chist)$y
cond<-!is.na(Chistold)
Chist[(1:nyears)[cond]]<-Chistold[(1:nyears)[cond]]
print(data.frame("Catches entered" = Chistold, "Catches interpolated"=Chist))
}
if (dim(CAA)[1] != nyears) stop("Number of CAA rows (", dim(CAA)[1], ") does not equal nyears (", nyears, "). NAs are acceptable")
if (dim(CAA)[2] != OM@maxage) {
message("Number of CAA columns (", dim(CAA)[2], ") does not equal OM@maxage (", OM@maxage, ")")
message("Assuming no CAA for ages greater than ", dim(CAA)[2], ' and filling with 0s')
addages <- OM@maxage-dim(CAA)[2]
CAA2 <- matrix(0, nrow=nrow(CAA), ncol=addages)
CAA <- cbind(CAA, CAA2)
}
if(length(as.vector(CAL))==1){
CALswitch=F # don't do CAL calcs
CALLH<-0 # likelihood contribution is nil
}else{
if (dim(CAL)[1] != nyears) stop("Number of CAL rows (", dim(CAL)[1], ") does not equal nyears (", nyears, "). NAs are acceptable")
if(is.na(mulen[1])) stop("You must specify the argument mulen, which is the mean length of each length bin (columns) of the CAL data")
if (dim(CAL)[2] != length(mulen)) {
stop("The argument mulen (the mean length of each length bin) should be of the same length as the number of columns of the CAL data")
}
CALyrs<-(1:nrow(CAL))[apply(CAL,1,function(x)sum(is.na(x)))<ncol(CAL)]
CALswitch=T
}
nlen<-length(mulen)
if (burnin < 0.05*nits) burnin <- 0.05 * nits
if("nsim"%in%slotNames(OM))nsim<-OM@nsim
if("proyears"%in%slotNames(OM))proyears<-OM@proyears
OM@nsim<-nsim
OM@proyears<-proyears
# Sample custom parameters
SampCpars <- list() # empty list
# custom parameters exist - sample and write to list
if(length(OM@cpars)>0){
# ncparsim<-cparscheck(OM@cpars) # check each list object has the same length and if not stop and error report
SampCpars <- SampleCpars(OM@cpars, nsim)
}
# Sample Stock Parameters
options(warn=-1)
StockPars <- SampleStockPars(OM, nsim, nyears, proyears, SampCpars, msg=FALSE)
options(warn=1)
# Assign Stock pars to function environment
for (X in 1:length(StockPars)) assign(names(StockPars)[X], StockPars[[X]])
agearr<-array(rep(1:maxage,each=nsim),c(nsim,maxage))
Wt_age <- Wt_age[,,nyears] # no time-varying growth
Mat_age<- Mat_age[,,nyears]
Len_age<-Len_age[,,nyears]
# iALK script =================================================
if(CALswitch){
lvar<-runif(nsim,OM@LenCV[1],OM@LenCV[2])
iALK<-array(NA,c(nsim,maxage,nlen))
ind<-as.matrix(expand.grid(1:nsim,1:maxage,1:nlen))
Lind<-ind[,c(1,2)]
iALK[ind]<-dnorm(mulen[ind[,3]],Len_age[Lind],lvar[ind[,1]]*Len_age[Lind])
sums<-apply(iALK,1:2,sum)
sind<-ind[,1:2]
iALK<-iALK/sums[sind]
#contour(x=1:maxage,y=1:nlen,iALK[3,,],nlevels=10)
}
# Sample Fleet Parameters
options(warn=-1)
FleetPars <- SampleFleetPars(SubOM(OM, "Fleet"), Stock=StockPars, nsim,
nyears, proyears, cpars=SampCpars)
options(warn=1)
# Assign Fleet pars to function environment
for (X in 1:length(FleetPars)) assign(names(FleetPars)[X], FleetPars[[X]])
# Sampled arrays
Chist_a<-array(trlnorm(nyears*nsim,1,Cobs)*rep(Chist,each=nsim),c(nsim,nyears)) # Historical catch
# set up mcmc
lnR0<-lninfl<-lnslp<-array(NA,c(nsim,nits))
lnRD<-array(0,c(nsim,nyears+maxage,nits))
LHD<-array(NA,c(nsim,nits))
# if(sfIsRunning())sfExport(list=c("Chist_a"))
if(sfIsRunning()){
R0LB<-sfSapply(1:nsim,LSRA,FF=M*4,Chist_arr=Chist_a,M=M,Mat_age=Mat_age,Wt_age=Wt_age,
sel=Mat_age,Recdevs=array(1,c(nsim,nyears+maxage)),h=hs)
R0UB<-sfSapply(1:nsim,LSRA,FF=M/10,Chist_arr=Chist_a,M=M,Mat_age=Mat_age,Wt_age=Wt_age,
sel=Mat_age,Recdevs=array(1,c(nsim,nyears+maxage)),h=hs)
}else{
R0LB<-sapply(1:nsim,LSRA,FF=M*4,Chist_arr=Chist_a,M=M,Mat_age=Mat_age,Wt_age=Wt_age,
sel=Mat_age,Recdevs=array(1,c(nsim,nyears+maxage)),h=hs)
R0UB<-sapply(1:nsim,LSRA,FF=M/10,Chist_arr=Chist_a,M=M,Mat_age=Mat_age,Wt_age=Wt_age,
sel=Mat_age,Recdevs=array(1,c(nsim,nyears+maxage)),h=hs)
}
R0b=cbind(R0LB-1,R0UB+1)
inflb<-log(c(0.5,maxage*0.5))
slpb<-log(exp(inflb)*c(0.1,2))#c(-3,3)
RDb<-c(-2,2)
# initial guesses
lnR0[,1]<-R0UB#log(apply(Chist_a,1,mean))
lninfl[,1]<-log(maxage/4)
lnslp[,1]<-log(exp(lninfl[,1])*0.2)
lnRD[,,1]<-0
# parameter vector
pars<-c(lnR0[,1],lninfl[,1],lnslp[,1],lnRD[,,1])
npars<-length(pars)
# parameter store
LHstr<-array(NA,c(nsim,nits))
parstr<-array(NA,c(npars,nits))
# parameter indexes
R0ind<-1:nsim
inflind<-(1*nsim)+(1:nsim)
slpind<-(2*nsim)+(1:nsim)
RDind<-(3*nsim+1):length(pars)
# Parameter jumping
JumpCV<-rep(0.05,npars) # R0
JumpCV[inflind]<-0.05
JumpCV[slpind]<-0.05
JumpCV[RDind]<-0.1*mean(procsd) # a function of sigmaR to provide reasonable acceptance rate
JumpCV<-JumpCV*Jump_fac
# parameter censorship
parLB<-parUB<-rep(NA,length(pars))
parLB[R0ind]<-R0b[,1]
parLB[inflind]<-inflb[1]
parLB[slpind]<-slpb[1]
parLB[RDind]<-RDb[1]
parUB[R0ind]<-R0b[,2]
parUB[inflind]<-inflb[2]
parUB[slpind]<-slpb[2]
parUB[RDind]<-RDb[2]
CAAadj=sum(CAA,na.rm=T)/ESS # ESS adjustment to low sample sizes
CALadj=sum(CAL,na.rm=T)/ESS # ESS adjustment to low sample sizes
update<-(1:50)*(nits/50)
adapt<-c(rep(5,100),rep(2.5,100),rep(1,nits-200))
CAA_pred<-array(NA,c(nsim,nyears,maxage))
if(CALswitch){
CAL_pred<-array(NA,c(nsim,nyears,nlen))
CALtemp<-array(NA,c(nsim,maxage,nlen))
CAAind<-cbind(ind[,1],rep(1,nsim*maxage),ind[,2]) # sim, year, age
}
PredF<-MLpred<-SSB<-array(NA,c(nsim,nyears))
for(i in 1:nits){
if(i %in% update){
cat(".")
flush.console()
}
#i<-i+1# debugging
Reject<-rep(FALSE,nsim)
nupars<-rnorm(npars,pars,JumpCV*adapt[i])
nupars[nupars<parLB]<-parLB[nupars<parLB]
nupars[nupars>parUB]<-parUB[nupars>parUB]
if(i==1)nupars=pars
R0<-exp(nupars[R0ind])
infl<-exp(nupars[inflind])
#infl<-(0.05+(infl/(1+infl))*0.45)*maxage
slp<-exp(nupars[slpind])
#slp<-(0.02+(slp/(1+slp))*0.98)*infl
RD<-exp(array(nupars[RDind],c(nsim,nyears+maxage)))
RD<-RD/apply(RD,1,mean)
sel<-1/(1+exp((infl-(agearr))/slp))
# calcs (getting pen as a zero or a 1)
N<-R0*exp(-M*(agearr-1))
SSB0<-apply(N*Mat_age*Wt_age,1,sum)
SSBpR<-SSB0/R0
CAA_pred[]<-NA
if(CALswitch)CAL_pred[]<-0
PredF[]<-NA
MLpred[]<-NA
SSB[]<-NA
for(y in 1:nyears){ # M - F - aging / recruitment
if(y==1)N<-N*RD[,maxage:1]
SSB[,y]<-apply(N*Mat_age*Wt_age,1,sum)
PredN<-N*exp(-M/2)
PredVN<-PredN*sel
CAA_pred[,y,]<-PredVN/apply(PredVN,1,sum)
if(CALswitch){
if(y%in%CALyrs){
CAAind[,2]<-y
CALtemp[ind]<-iALK[ind]*CAA_pred[CAAind]
CAL_pred[,y,]<-CAL_pred[,y,]+apply(CALtemp,c(1,3),sum)
}
}
MLpred[,y]<-apply(CAA_pred[,y,]*Len_age,1,sum)/apply(CAA_pred[,y,],1,sum)
PredVW<-PredVN*Wt_age # Predicted vulnerable weight
Predfrac<-PredVW/apply(PredVW,1,sum) # Catch weight distribution over ages
Cat<-(Chist_a[,y]*Predfrac)/Wt_age # Guess at catch numbers by age
predU<-Cat/PredN # Which means this harvest rate
predU[!is.finite(predU)] <- Inf
cond<-predU>Umax # Check max U
Reject[apply(cond,1,sum)>0]<-TRUE # Reject sims where U > Umax for any age class
Cat[cond]<-Cat[cond]/(predU[cond]/Umax) # Set catch to Umax
PredF[,y]<--log(1-apply(Cat/PredN,1,max)) # apical F
N<-N*exp(-M)-Cat #PredF[,y]*sel)
N[,2:maxage]<-N[,1:(maxage-1)] # aging
N[,1]<-RD[,maxage+y]*(0.8*R0*hs*SSB[,y])/(0.2*SSBpR*R0*(1-hs)+(hs-0.2)*SSB[,y])
N[N<0] <- tiny
}
Ipred<-SSB
Ipred[matrix(rep(Imiss,each=nsim),nrow=nsim)]<-NA
Ipred<-Ipred/apply(Ipred,1,mean,na.rm=T)
Ires<-Ipred/matrix(rep(Ind,each=nsim),nrow=nsim)
MLres<-MLpred/matrix(rep(ML,each=nsim),nrow=nsim)
Ires[Ires<(1E-10)]<-(1E-10)
Ires[Ires>1E10]<-1E10
MLres[MLres<(1E-10)]<-(1E-10)
MLres[MLres>1E10]<-1E10
CAA_pred[CAA_pred<1E-15]<-1E-15
if(CALswitch){
CAL_pred<-CAL_pred/array(apply(CAL_pred,1:2,sum),dim(CAL_pred))
CAL_pred[CAL_pred<1E-15]<-1E-15
}
CAALH<-apply(log(CAA_pred)*
array(rep(CAA,each=nsim)/CAAadj,c(nsim,nyears,maxage)),
1,sum,na.rm=T)
if(CALswitch){
CALLH<-apply(log(CAL_pred[,CALyrs,])*
array(rep(CAL[CALyrs,],each=nsim)/CALadj,c(nsim,length(CALyrs),nlen)),
1,sum,na.rm=T)
}
RDLH<-apply(matrix(dnorm(nupars[RDind],-(procsd^2)/2,procsd,log=T),nrow=nsim),1,sum)
ILH<-apply(dnorm(log(Ires),-(Iobs^2)/2,Iobs,log=T),1,sum,na.rm=T)
MLLH<-apply(dnorm(log(MLres),-(MLsd^2)/2,MLsd,log=T),1,sum,na.rm=T)
LH<-wts[1]*CAALH+wts[2]*RDLH+wts[3]*ILH+wts[4]*MLLH+wts[5]*CALLH
# Reject / accept (cond)
if(i > 1){
Accept<-runif(nsim)<exp(LH-LHstr[,i-1])
Accept[Reject]<-FALSE
LHstr[,i]<-LHstr[,i-1]
LHstr[Accept,i]<-LH[Accept]
Aind<-rep(Accept,npars/nsim) # The correct index in the pars vector for the accepted simulations
parstr[,i]<-parstr[,i-1]
parstr[Aind,i]<-nupars[Aind]
pars<-parstr[,i]
# print(rbind(Reject,Accept))
}else{
parstr[,i]<-pars
LHstr[,i]<-LH
}
} # End of MCMC
if(!is.na(SRAdir))jpeg(paste0(SRAdir,"/SRA_convergence.jpg"),width=7,height=9,units='in',res=400)
if(ploty){
col<-rep(c("blue","red","green","orange","grey","brown","pink","yellow","dark red","dark blue","dark green"),100)
par(mfcol=c(5,2),mai=c(0.7,0.6,0.05,0.1))
pind<-(1:(nits/thin))*thin
matplot(pind,t(parstr[1:nplot,pind]),type='l',ylab="log R0",xlab="Iteration")
abline(v=burnin,lty=2)
matplot(pind,t(parstr[nsim+(1:nplot),pind]),type='l',ylab="log infl (sel)",xlab="Iteration")
abline(v=burnin,lty=2)
matplot(pind,t(parstr[(nsim*2)+(1:nplot),pind]),type='l',ylab="log slp (sel)",xlab="Iteration")
abline(v=burnin,lty=2)
matplot(pind,t(parstr[(nsim*30)+(1:nplot),pind]),type='l',ylab="recdev1",xlab="Iteration")
abline(v=burnin,lty=2)
matplot(pind,t(parstr[(nsim*40)+(1:nplot),pind]),type='l',ylab="recdev2",xlab="Iteration")
abline(v=burnin,lty=2)
burn<-burnin:nits
plot(density(parstr[1:nsim,burn],adj=0.7),xlab="log(R0)",main="")
plot(density(parstr[nsim+(1:nsim),burn],adj=0.7),xlab="inflection selectivity",main="")
plot(density(parstr[(nsim*2)+(1:nsim),burn],adj=0.7),xlab="slope selectivity",main="")
plot(density(parstr[(nsim*30)+(1:nsim),burn],adj=0.7),xlab="recdev1",main="")
plot(density(parstr[(nsim*40)+(1:nsim),burn],adj=0.7),xlab="recdev2",main="")
}
if(!is.na(SRAdir))dev.off()
if(!is.na(SRAdir))jpeg(paste0(SRAdir,"/SRA predictions.jpg"),width=7,height=11,units='in',res=400)
if(ploty){
par(mfrow=c(6,2),mai=c(0.65,0.6,0.02,0.1))
qq<-apply(SSB,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(qq))
matplot(t(SSB[1:nplot,]),ylim=ylim,type="l",xlab="Year",ylab="SSB")
xs<-dim(SSB)[2]
plot(qq[3,],ylim=ylim,type='l',xlab="Year",ylab="SSB")
polygon(c(1:xs,xs:1),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(1:xs,xs:1),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(qq[3,],lwd=1,col="white")
D<-SSB/SSB0
qq<-apply(D,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(qq))
matplot(t(D[1:nplot,]),ylim=ylim,type="l",xlab="Year",ylab="Depletion")
plot(qq[3,],ylim=ylim,type='l',xlab="Year",ylab="Depletion")
polygon(c(1:xs,xs:1),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(1:xs,xs:1),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(qq[3,],lwd=1,col="white")
qq<-apply(PredF,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(qq))
matplot(t(PredF[1:nplot,]),ylim=ylim,type="l",xlab="Year",ylab="Fish. Mort.")
plot(qq[3,],ylim=ylim,type='l',xlab="Year",ylab="Fish. Mort.")
polygon(c(1:xs,xs:1),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(1:xs,xs:1),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(qq[3,],lwd=1,col="white")
nyears<-dim(CAA)[1]
nages<-dim(CAA)[2]
qq<-apply(sel,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(qq))
xs<-maxage
matplot(t(sel[1:nplot,]),ylim=ylim,type="l",xlab="Age",ylab="Selectivity")
plot(qq[3,],ylim=ylim,type='l',xlab="Age",ylab="Selectivity")
polygon(c(1:xs,xs:1),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(1:xs,xs:1),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(qq[3,],lwd=1,col="white")
RDx<-(-maxage+1):nyears
qq<-apply(RD,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(qq))
xs<-dim(RD)[2]
matplot(RDx,t(RD[1:nplot,]),ylim=ylim,type="l",xlab="Year",ylab="Rec. Dev.")
plot(RDx,qq[3,],ylim=ylim,type='l',xlab="Year",ylab="Rec. Dev.")
polygon(c(RDx,RDx[xs:1]),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(RDx,RDx[xs:1]),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(RDx,qq[3,],lwd=1,col="white")
plot(c(1,nyears),c(1,nages),col='white',xlab="Year",ylab="Age")
legend("top",legend="Observed composition data",bty='n')
points(rep(1:nyears,nages),rep(1:nages,each=nyears),cex=CAA^0.5/max(CAA^0.5,na.rm=T)*1.5,pch=19,col=makeTransparent("dark grey",60))
CAA_pred1<-CAA_pred
CAA_pred1[CAA_pred1<0.002]<-NA
plot(c(1,dim(CAA)[1]),c(1,dim(CAA)[2]),col='white',xlab="Year",ylab="Age")
legend("top",legend="Predicted composition data (1 sim)",bty='n')
points(rep(1:nyears,nages),rep(1:nages,each=nyears),cex=CAA_pred1[1,,]^0.5/max(CAA_pred1[1,,]^0.5,na.rm=T)*1.5,pch=19,col=makeTransparent("dark grey",60))
}
if(!is.na(SRAdir))dev.off()
dep<-SSB[,nyears]/SSB0
procsd<-apply(RD,1,sd,na.rm=T)
procmu <- -0.5 * (procsd)^2 # adjusted log normal mean
OM@D<-quantile(dep,c(0.05,0.95))
OM@Perr<-quantile(procsd,c(0.025,0.975))
getAC<-function(recdev)acf(recdev,plot=F)$acf[2,1,1]
AC<-apply(RD,1,getAC)
OM@AC<-quantile(AC,c(0.05,0.95))
A5<--(slp*log(1/0.05-1)-infl)
A95<--(slp*log(1/0.95-1)-infl)
L5<-Linf*(1-exp(-K*(A5-t0)))
L95<-Linf*(1-exp(-K*(A95-t0)))
OM@L5<-quantile(L5,c(0.05,0.95))
OM@LFS<-quantile(L95,c(0.05,0.95))
OM@nyears<-nyears
OM@EffYears<-1:OM@nyears
OM@EffLower<-apply(PredF,2,quantile,p=0.05)
OM@EffUpper<-apply(PredF,2,quantile,p=0.95)
OM@nyears<-nyears
Perr<-array(NA,c(nsim,maxage+nyears+proyears-1))
Perr[,1:(nyears+maxage-1)]<-log(RD[,2:(maxage+nyears)])
Perr[,(nyears+maxage):(nyears+maxage+proyears-1)]<-matrix(rnorm(nsim*(proyears),rep(procmu,proyears),rep(procsd,proyears)),nrow=nsim)
for (y in (maxage+nyears):(nyears + proyears+maxage-1)) Perr[, y] <- AC * Perr[, y - 1] + Perr[, y] * (1 - AC * AC)^0.5
Perr<-exp(Perr)
PredF<-PredF/apply(PredF,1,mean) # Find should be mean 1 so qs optimizers are standardized
Wt_age <- array(Wt_age, dim=c(dim=c(nsim, maxage, nyears+proyears)))
Len_age <- array(Len_age, dim=c(nsim, maxage, nyears+proyears))
Marray <- matrix(M, nrow=nsim, ncol=proyears+nyears)
OM@cpars<-list(D=dep,M=M,procsd=procsd,AC=AC,hs=hs,Linf=Linf,
Wt_age=Wt_age, Len_age=Len_age, Marray=Marray,
K=K,t0=t0,L50=L50,
L5=L5,LFS=L95,Find=PredF,
V=array(sel,c(nsim,maxage,nyears)),Perr=Perr,R0=R0,
Iobs=apply(Ires,1,sd,na.rm=T),
SSB=SSB,SSB0=SSB0,RD=RD) # not valid for runMSE code but required
OM
}
#' Generic comparison plot for simulation testing of Stochastic SRA method
#'
#' @description Plots simulation variables versus estimation variables for Stochastic SRA methods of conditioning operating models.
#' @param simy The simulated time series
#' @param samy The matrix of estimated time series from of StochasticSRA() function.
#' @param xlab The x axis label for the plot
#' @param ylab The y axis label for the plot
#' @param maxplot The total number of individual simulations to be plotted in the first plot
#' @param type Should a line 'l' or points 'p' be plotted?
#' @return A plot
#' @author T. Carruthers (Canadian DFO grant)
#' @export compplot
#' @examples
#' nyears<-100
#' nsims<-200
#' simy<-sin(seq(0,2,length.out=nyears))
#' samy<-array(rep(simy,each=nsims)*rnorm(nsims,1,0.2)*rnorm(nsims*nyears,1,0.1),c(nsims,nyears))
#' par(mfrow=c(1,2))
#' compplot(simy,samy,xlab="Year",ylab="Some time varying parameter")
compplot<-function(simy,samy,xlab="",ylab="",maxplot=10,type="l"){
col<-rep(c("blue","red","green","orange","grey","brown","pink","yellow","dark red","dark blue","dark green"),100)
nsim<-dim(samy)[1]
xs<-dim(samy)[2]
qq<-apply(samy,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(simy,qq))
plot(simy,ylim=ylim,type=type,xlab=xlab,ylab=ylab)
for(i in 1:min(nsim,maxplot))lines(samy[i,],col=col[i])
if(type=="l")lines(simy,lwd=3)
if(type=="p")points(simy,pch=19)
plot(simy,ylim=ylim,type='l',xlab=xlab,ylab=ylab)
polygon(c(1:xs,xs:1),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(1:xs,xs:1),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(qq[3,],lwd=1,col="white")
if(type=="l")lines(simy,lwd=3)
if(type=="p")points(simy,pch=19)
}
#' Plot simulation test of Stochastic SRA method
#'
#' @description Plots simulation variables versus estimation variables for Stochastic SRA methods of conditioning operating models.
#' @param sim The output list object of SRAsim() function.
#' @param OM The output object of StochasticSRA() function.
#' @param outfile The name of the figure (something.jpg) you wish to make using SRAcomp
#' @param maxplot The maximum number of simulations to plot
#' @author T. Carruthers (Canadian DFO grant)
#' @export SRAcomp
#' @examples
#' \dontrun{
#' sim<-SRAsim(testOM,qmult=1,patchy=0.8)
#' CAA<-sim$CAA
#' Chist<-sim$Chist
#' testOM<-StochasticSRA(testOM,CAA,Chist,nsim=30,nits=500)
#' SRAcomp(sim,testOM)
#' }
SRAcomp<-function(sim,OM,outfile=NA,maxplot=10){
sam<-OM@cpars
if(!is.na(outfile))jpeg(outfile,width=7,height=9,units='in',res=400)
nsim<-dim(sam$SSB)[1]
nyears<-dim(sam$SSB)[2]
maxage<- dim(sim$CAA)[2]
PEest<-"Perr"%in%names(sam)
if(PEest)par(mfrow=c(4,2),mai=c(0.5,0.7,0.05,0.05))
if(!PEest)par(mfrow=c(3,2),mai=c(0.5,0.7,0.05,0.05))
# SSB
compplot(sim$SSB,sam$SSB,xlab="Year",ylab="SSB",maxplot=maxplot)
# Depletion
compplot(sim$SSB/sim$SSB0,sam$SSB/sam$SSB0,xlab="Year",ylab="Depletion",maxplot=maxplot)
# Recdevs
if(PEest)compplot(sim$Recdevs,sam$RD,xlab="Year",ylab="log recruitment",type="l",maxplot=maxplot)
# Selectivity
compplot(sim$sel,sam$V[,,1],xlab="Age",ylab="Selectivity",type="l",maxplot=maxplot)
legend('bottomright',legend=c("Simulated","Estimated 90% PI","Estimated 50% PI"),bty='n',text.col=c("black","grey","dark grey"),text.font=rep(2,3))
if(!is.na(outfile))dev.off()
}
#' Estimates R0 using SRA to match current F estimates and avoid penalities for low stock sizes
#'
#' @param x a position in the various arrays and vectors that corresponds with a simulation (for use with sapply)
#' @param FF a vector of recent fishign mortality rates (apical Fs)
#' @param Chist_arr a vector of historical catch observations `[nyears]`
#' @param M a vector of natural mortality rates `[nsim]`
#' @param Mat_age a matrix of maturity at age `[nsim x nage]`
#' @param Wt_age a matrix of weight at age `[nsim x nage]`
#' @param sel a matrix of selectivity at age `[nsim x nage]`
#' @param Recdevs a matrix of recruitment deviations `[nsim x nyears]`
#' @param h a vector of steepness values of the Bev-Holt Stock-Recruitment relationship
#' @return all package data objects are placed in the global namespace \code{dir}
#' @export LSRA
#' @author T. Carruthers
LSRA<-function(x,FF,Chist_arr,M,Mat_age,Wt_age,sel,Recdevs,h){
maxage<-ncol(Mat_age)
SSB0guess<-sum(Chist_arr[x,])*c(0.05,100)
SSBpR<-sum(exp(-M[x]*(0:(maxage-1)))*Mat_age[x,]*Wt_age[x,])
R0range=SSB0guess/SSBpR
# Modes 1:obj 2:Fpred 3:depletion 4:R0 5:Ffit
opt<-optimize(LSRA_opt,interval=log(R0range),
FF_a=FF[x],
Chist=Chist_arr[x,],
M_a=M[x],
Mat_age_a=Mat_age[x,],
Wt_age_a=Wt_age[x,],
sel_a=sel[x,],
Recdevs_a=Recdevs[x,],
h_a=h[x])
opt$minimum
}
#' Alternative version of LSRA that's a wrapper for LSRA_opt to return the right type of output (mode) using sapply
#'
#' @param x a position in the various arrays and vectors that corresponds with a simulation (for use with sapply)
#' @param lnR0s a vector nsim long that are estimated R0 values
#' @param FF a vector of recent fishign mortality rates (apical Fs)
#' @param Chist a vector of historical catch observations `[nyears]`
#' @param M a vector of natural mortality rates `[nsim]`
#' @param Mat_age a matrix of maturity at age `[nsim x nage]`
#' @param Wt_age a matrix of weight at age `[nsim x nage]`
#' @param sel a matrix of selectivity at age `[nsim x nage]`
#' @param Recdevs a matrix of recruitment deviations `[nsim x nyears]`
#' @param h a vector of steepness values of the Bev-Holt Stock-Recruitment relationship
#' @param mode optimization or plotting
#' @return all package data objects are placed in the global namespace \code{dir}
#' @export LSRA2
#' @author T. Carruthers
LSRA2<-function(x,lnR0s,FF,Chist,M,Mat_age,Wt_age,sel,Recdevs,h,mode=2){
LSRA_opt(lnR0s[x], FF_a=FF[x], Chist=Chist[x,], M_a=M[x],
Mat_age_a=Mat_age[x,],Wt_age_a=Wt_age[x,],
sel_a=sel[x,],Recdevs_a=Recdevs[x,],h_a=h[x],mode=mode)
}
#' Internal estimation function for LSRA and LSRA2 functions
#'
#' @param param a numeric value representing log(R0)
#' @param FF_a numeric value, recent fishign mortality rate (apical F)
#' @param Chist a vector of historical catch observations `[nyears]`
#' @param M_a numeric value, natural mortality rate
#' @param Mat_age_a a vector of maturity at age `[nage]`
#' @param Wt_age_a a vector of weight at age `[nage]`
#' @param sel_a a vector of selectivity at age `[nage]`
#' @param Recdevs_a a vector of recruitment deviations `[nyears]`
#' @param h_a a numeric value of steepness values of the Bev-Holt Stock-Recruitment relationship
#' @param Umax maximum harvest rate per year
#' @param mode 1-5 see below
#' @return depends on mode but could be 1:objective function 2:trajectory of Fs 3: SSB depletion 4:log(R0) 5:diagnostic plots
#' @export LSRA_opt
#' @author T. Carruthers
LSRA_opt<-function(param,FF_a,Chist,M_a,Mat_age_a,Wt_age_a,sel_a,Recdevs_a,h_a,Umax=0.5,mode=1){
nyears<-length(Chist)
maxage<-length(Mat_age_a)
R0<-exp(param)
N<-R0*exp(-M_a*(0:(maxage-1)))
Nstr<-array(NA,c(nyears,maxage))
SSB0<-sum(N*Mat_age_a*Wt_age_a)
SSBpR<-SSB0/R0
pen<-0
PredF<-SSB<-rep(NA,nyears)
for(y in 1:nyears){
SSB[y]<-sum(N*Mat_age_a*Wt_age_a)
PredN<-N*exp(-M_a/2)
PredVN<-PredN*sel_a
PredVW<-PredVN*Wt_age_a
Predfrac<-PredVW/sum(PredVW)
Cat<-Chist[y]*Predfrac
predU<-Cat/(PredN*Wt_age_a)
cond<-predU>Umax
if(sum(cond)>0){
pen<-pen+sum(abs(predU[cond]-Umax)^2)
Cat[cond]<-Cat[cond]/(predU[cond]/Umax)
}
PredF[y]<--log(1-max(Cat/(N*Wt_age_a)))
N<-N*exp(-M_a-PredF[y]*sel_a)
N[2:maxage]<-N[1:(maxage-1)] # aging
N[1]<-Recdevs_a[y]*(0.8*R0*h_a*SSB[y])/(0.2*SSBpR*R0*(1-h_a)+(h_a-0.2)*SSB[y])
Nstr[y,]<-N
}
mupredF<-mean(PredF[(nyears-15):(nyears-5)])
if(mode==1){
return(pen+(log(mupredF)-log(FF_a))^2)
}else if(mode==2){
return(PredF)
}else if(mode==3){
return(SSB/SSB0)
}else if(mode==4){
return(param)
}else if(mode==5){
return(SSB)
}else{
par(mfrow=c(2,1))
plot(PredF)
abline(h=FF_a,col="red")
abline(h=mupredF,col="blue")
plot(Chist)
}
}
| /R/StochasticSRA.R | no_license | DLMtool/DLMtool | R | false | false | 48,477 | r | #' Simulates catch at age and catch history data for testing SRA methods
#'
#' @description Catch at age and catch simulator.
#' @param OM An operating model object with M, growth, stock-recruitment and maturity parameters specified.
#' @param qmult Fraction of natural mortality rate that is mean fishing mortality (Fishing catchability multiplier)
#' @param CAApatchy The fraction of years that have catch at age data
#' @param Cpatchy The fraction of years that have catch data
#' @param Ipatchy The fraction of years that have index data
#' @param MLpatchy The fraction of years that have mean length data
#' @param nCAA The number of independent annual catch at age observations (same among all years)
#' @param nL The number of independent annual catch at length observations (same among all years) for calculating mean length
#' @param sigmaE Level of simulated interannual variability in effort (F) expressed as a lognormal SD
#' @param sigmaI Observation error in relative abundance indices expressed as a lognormal SD
#' @return A list: Chist = historical catch series,Recdevs = historical recruitment deviations (mean = 1), CAA = catch at age matrix, N = numbers at age matrix, SSB = annual spawning biomass, FM = Fishing mortality rate at age matrix, M = natural mortality rate \code{classy}
#' @author T. Carruthers (Canadian DFO grant)
#' @export SRAsim
#' @examples
#' out<-SRAsim(testOM)
SRAsim<-function(OM,qmult=0.5,CAApatchy=0.4,Cpatchy=1,Ipatchy=0.4,MLpatchy=0.4,nCAA=100,nL=200,sigmaE=0.25,sigmaI=0.1){
maxage<-OM@maxage
nyears<-OM@nyears
M<-mean(OM@M) # Natural mortality rate
h<-mean(OM@h) # Steepness
Linf<-mean(OM@Linf)
K<-mean(OM@K) # Maximum growth rate
t0<-mean(OM@t0) # Theorectical length at age zero
lenM <- mean(OM@L50)
L50_90<-mean(OM@L50_95)
len95 <- lenM+L50_90
ageM <- -((log(1-lenM/Linf))/K) + t0 # calculate ageM from L50 and growth parameters (non-time-varying)
age95 <- -((log(1-len95/Linf))/K) + t0
Mat_age <- 1/(1 + exp(-log(19) * (((1:maxage) - ageM)/(age95 - ageM))))
Len_age<-Linf*(1-exp(-K*((1:maxage)-t0)))
Wt_age<-OM@a*Len_age^OM@b
M_age<-rep(M,maxage)
sel<-Mat_age
eff<-(sin((nyears:1)/18)+1)*exp(rnorm(nyears,1,sigmaE))
apFM<-eff/mean(eff)*qmult*M
N<-CAA<-array(NA,c(nyears,maxage))
Chist<-SSB<-ML<-rep(NA,nyears)
FM<-array(rep(apFM,maxage)*rep(sel,each=nyears),c(nyears,maxage))
Z<-array(FM+rep(M_age,each=nyears),c(nyears,maxage))
R0<-10000
sigma<-mean(OM@Perr)
Recdevs<-trlnorm(nyears-1+maxage,1,sigma)
Recdevs<-Recdevs/mean(Recdevs)
N0<-R0*exp(-M*(0:(maxage-1)))
SSB0<-sum(N0*Mat_age*Wt_age)
SSBpR<-SSB0/R0
N1<-N0*Recdevs[maxage:1]
for(y in 1:nyears){
if(y==1){
N[1,]<-N1
}else{
N[y,]<-N[y-1,]
}
SSB[y]<-sum(N[y,]*Mat_age*Wt_age)
CAA[y,]<-N[y,]*exp(-M_age/2)*(1-exp(-FM[y,]))
ML[y]<-mean(sample(rep(Len_age,CAA[y,]),nL))
Chist[y]<-sum(CAA[y,]*Wt_age)
N[y,]<-N[y,]*exp(-Z[y,])
N[y,2:maxage]<-N[y,1:(maxage-1)]
N[y,1]<-Recdevs[maxage+y]*(0.8 * R0 * h * SSB[y])/
(0.2*SSBpR*R0*(1-h)+(h-0.2)*SSB[y])
}
for(y in 1:nyears)CAA[y,]<-rmultinom(1,nCAA,CAA[y,])
CAA[sample(1:nyears,size=nyears*(1-Cpatchy),replace=F),]<-NA
par(mfrow=c(4,2),mai=c(0.8,0.8,0.05,0.05))
plot(SSB,type="l",xlab="Year",ylim=c(0,max(SSB)))
plot(Chist,type="l",xlab="Year",ylim=c(0,max(Chist)))
plot(apFM,type="l",xlab="Year",ylab="ApicalF")
plot(sel,type="l",xlab="Age",ylab="Selectivity")
plot((-maxage+2):nyears,Recdevs,xlab="Year")
abline(v=0.5,col='blue')
plot(c(1,nyears),c(1,maxage),col='white',xlab="Year",ylab="Age")
legend("top",legend="Catch composition data",bty='n')
points(rep(1:nyears,maxage),rep(1:maxage,each=nyears),cex=CAA^0.5/max(CAA^0.5,na.rm=T)*2.5)
Ind<-trlnorm(nyears,1,sigmaI)*(SSB/SSB[1])
Ind[sample(1:nyears,size=nyears*(1-Ipatchy),replace=F)]<-NA
Ind<-Ind/mean(Ind,na.rm=T)
ML[sample(1:nyears,size=nyears*(1-MLpatchy),replace=F)]<-NA
plot(1:nyears,Ind,xlab="Year",ylim=c(0,max(Ind,na.rm=T)))
plot(1:nyears,ML,xlab="Year")
return(list(Chist=Chist,Recdevs=Recdevs,CAA=CAA,Ind=Ind,ML=ML,N=N,SSB=SSB,FM=FM,M=M,SSB0=SSB0,sel=sel))
}
# helper functions
LSRA_cppWrapper <- function(param, FF_a, Chist, M_a, Mat_age_a, Wt_age_a, sel_a,
Recdevs_a, h_a, Umax=0.5, mode=1) {
LSRA_opt_cpp(param, FF_a, Chist, M_a, Mat_age_a, Wt_age_a, sel_a,
Recdevs_a, h_a, Umax)[[mode]]
}
LSRA_cpp <-function(x,FF,Chist_arr,M,Mat_age,Wt_age,sel,Recdevs,h){
maxage<-ncol(Mat_age)
SSB0guess<-sum(Chist_arr[x,])*c(0.05,100)
SSBpR<-sum(exp(-M[x]*(0:(maxage-1)))*Mat_age[x,]*Wt_age[x,])
R0range=SSB0guess/SSBpR
# Modes 1:obj 2:Fpred 3:depletion 4:R0 5:Ffit
opt<-optimize(LSRA_cppWrapper,interval=log(R0range),
FF_a=FF[x],
Chist=Chist_arr[x,],
M_a=M[x],
Mat_age_a=Mat_age[x,],
Wt_age_a=Wt_age[x,],
sel_a=sel[x,],
Recdevs_a=Recdevs[x,],
h_a=h[x], mode=1)
opt$minimum
}
#' Stochastic SRA construction of operating models
#'
#' @description Specify an operating model, using catch composition data and a historical catch series.
#' Returns and operating model with depletion (D), selectivity parameters (L5, LFS) and effort trajectory (Effyears, EffLower, EffUpper) filled.
#' Modified version using cpp code.
#' @param OM An operating model object with M, growth, stock-recruitment and maturity parameters specified.
#' @param CAA A matrix nyears (rows) by nages (columns) of catch at age (age 1 to maxage in length)
#' @param Chist A vector of historical catch observations (nyears long) going back to unfished conditions
#' @param Ind A vector of historical index observations (nyears long, may be patchy with NAs) going back to unfished conditions.
#' @param Cobs A numeric value representing catch observation error as a log normal sd
#' @param sigmaR A numeric value representing the prior standard deviation of log space recruitment deviations
#' @param Umax A numeric value representing the maximum harvest rate for any age class (rejection of sims where this occurs)
#' @param nsim The number desired draws of parameters / effort trajectories
#' @param proyears The number of projected MSE years
#' @param Jump_fac A multiplier of the jumping distribution variance to increase acceptance (lower Jump_fac) or decrease acceptance rate (higher Jump_fac)
#' @param nits The number of MCMC iterations
#' @param burnin The number of initial MCMC iterations to discard
#' @param thin The interval over which MCMC samples are extracted for use in graphing / statistics
#' @param ESS Effective sample size - the weighting of the catch at age data
#' @param ploty Do you want to see diagnostics plotted?
#' @param nplot how many MCMC samples should be plotted in convergence plots?
#' @param SRAdir A directory where the SRA diagnostics / fit are stored
#' @return A list with three positions. Position 1 is the filled OM object, position 2 is the custompars data.frame that may be submitted as an argument to runMSE() and position 3 is the matrix of effort histories `[nyears x nsim]` vector of objects of class\code{classy}
#' @author T. Carruthers (Canadian DFO grant)
#' @references Walters, C.J., Martell, S.J.D., Korman, J. 2006. A stochastic approach to stock reduction analysis. Can. J. Fish. Aqua. Sci. 63:212-213.
#' @examples
#' \dontrun{
#' setup()
#' sim<-SRAsim(testOM,patchy=0.8)
#' CAA<-sim$CAA
#' Chist<-sim$Chist
#' testOM<-StochasticSRA(testOM,CAA,Chist,nsim=30,nits=1000)
#' runMSE(testOM)
#' }
StochasticSRAcpp <-function(OM,CAA,Chist,Ind,Cobs=0.1,sigmaR=0.5,Umax=0.9,nsim=48,proyears=50,
Jump_fac=1,nits=20000,
burnin=1000,thin=50,ESS=300,ploty=T,nplot=6,SRAdir=NA){
OM <- updateMSE(OM) # Check that all required slots in OM object contain values
nyears<-length(Chist)
if(class(Chist)=="matrix")nyears<-nrow(Chist)
maxage<-OM@maxage
if (OM@nyears != nyears) {
message("OM@nyears being updated to length Chist: ", nyears)
OM@nyears <- nyears
}
if (dim(CAA)[1] != nyears) stop("Number of CAA rows (", dim(CAA)[1], ") does not equal nyears (", nyears, "). NAs are acceptable")
if (dim(CAA)[2] != OM@maxage) {
message("Number of CAA columns (", dim(CAA)[2], ") does not equal OM@maxage (", OM@maxage, ")")
message("Assuming no CAA for ages greater than ", dim(CAA)[2], ' and filling with 0s')
addages <- OM@maxage-dim(CAA)[2]
CAA2 <- matrix(0, nrow=nrow(CAA), ncol=addages)
CAA <- cbind(CAA, CAA2)
}
if (burnin < 0.05*nits) burnin <- 0.05 * nits
if("nsim"%in%slotNames(OM))nsim<-OM@nsim
if("proyears"%in%slotNames(OM))proyears<-OM@proyears
OM@nsim<-nsim
OM@proyears<-proyears
# Sample custom parameters
SampCpars <- list() # empty list
# custom parameters exist - sample and write to list
if(length(OM@cpars)>0){
# ncparsim<-cparscheck(OM@cpars) # check each list object has the same length and if not stop and error report
SampCpars <- SampleCpars(OM@cpars, nsim)
}
# Sample Stock Parameters
options(warn=-1)
StockPars <- SampleStockPars(OM, nsim, nyears, proyears, SampCpars)
options(warn=1)
# Assign Stock pars to function environment
for (X in 1:length(StockPars)) assign(names(StockPars)[X], StockPars[[X]])
agearr<-array(rep(1:maxage,each=nsim),c(nsim,maxage))
Wt_age <- Wt_age[,,nyears] # no time-varying growth
Mat_age<- Mat_age[,,nyears]
# Sample Fleet Parameters
options(warn=-1)
FleetPars <- SampleFleetPars(SubOM(OM, "Fleet"), Stock=StockPars, nsim,
nyears, proyears, cpars=SampCpars)
options(warn=1)
# Assign Fleet pars to function environment
for (X in 1:length(FleetPars)) assign(names(FleetPars)[X], FleetPars[[X]])
# Sample historical catch
Chist_a<-array(trlnorm(nyears*nsim,1,Cobs)*rep(Chist,each=nsim),c(nsim,nyears)) # Historical catch
# set up mcmc
lnR0<-lninfl<-lnslp<-array(NA,c(nsim,nits))
lnRD<-array(0,c(nsim,nyears+maxage,nits))
LHD<-array(NA,c(nsim,nits))
if(sfIsRunning()){
R0LB<-sfSapply(1:nsim,LSRA_cpp,FF=M*4,Chist_arr=Chist_a,M=M,Mat_age=Mat_age,Wt_age=Wt_age,
sel=Mat_age,Recdevs=array(1,c(nsim,nyears+maxage)),h=hs)
R0UB<-sfSapply(1:nsim,LSRA_cpp,FF=M/10,Chist_arr=Chist_a,M=M,Mat_age=Mat_age,Wt_age=Wt_age,
sel=Mat_age,Recdevs=array(1,c(nsim,nyears+maxage)),h=hs)
}else{
R0LB <- sapply(1:nsim,LSRA_cpp,FF=M*4,Chist_arr=Chist_a,M=M,Mat_age=Mat_age,Wt_age=Wt_age,
sel=Mat_age,Recdevs=array(1,c(nsim,nyears+maxage)),h=hs)
R0UB <- sapply(1:nsim,LSRA_cpp,FF=M/10,Chist_arr=Chist_a,M=M,Mat_age=Mat_age,Wt_age=Wt_age,
sel=Mat_age,Recdevs=array(1,c(nsim,nyears+maxage)),h=hs)
}
R0b <- cbind(R0LB-1,R0UB+1)
inflb<-log(c(0.5,maxage*0.5))
slpb<-log(exp(inflb)*c(0.1,2))#c(-3,3)
RDb<-c(-2,2)
# initial guesses
lnR0[,1]<-R0UB#log(apply(Chist_a,1,mean))
lninfl[,1]<-log(maxage/4)
lnslp[,1]<-log(exp(lninfl[,1])*0.2)
lnRD[,,1]<-0
# parameters
pars <- cbind(lnR0[,1], lninfl[,1], lnslp[,1], lnRD[,,1])
npars<-ncol(pars)
# parameter store
LHstr<-array(NA,c(nsim,nits))
parstr<-array(NA,c(nsim, npars,nits))
# parameter indexes
R0ind <-1
inflind <- 2
slpind <- 3
RDind <- 4:npars
# Parameter jumping
JumpCV<-rep(0.05,npars) # R0
JumpCV[inflind]<-0.05
JumpCV[slpind]<-0.05
JumpCV[RDind]<-0.1*sigmaR # a function of sigmaR to provide reasonable acceptance rate
JumpCV<-JumpCV*Jump_fac
# parameter censorship
parLB<-parUB<-matrix(NA, nsim, npars)
parLB[,1]<-R0b[,1]
parLB[,2]<-inflb[1]
parLB[,3]<-slpb[1]
parLB[,4:npars]<-RDb[1]
parUB[,1]<-R0b[,2]
parUB[,2]<-inflb[2]
parUB[,3]<-slpb[2]
parUB[,4:npars]<-RDb[2]
CAAadj=sum(CAA,na.rm=T)/ESS # ESS adjustment to low sample sizes
# update<-(1:50)*(nits/50)
adapt<-c(rep(5,100),rep(2.5,100),rep(1,nits-200))
message("Running MCMC (may take a while!)")
if (snowfall::sfIsRunning()) {
mcmc <- snowfall::sfSapply(1:nsim, function(sim) {
#LSRA_MCMC_sim(nits=nits, pars[sim,], JumpCV, adapt, parLB[sim,], parUB[sim,], R0ind-1,
# inflind-1, slpind-1, RDind-1, nyears, maxage, M[sim], Mat_age[sim,],
# Wt_age[sim,], Chist_a[sim,], Ind, Umax, hs[sim], CAA, CAAadj, sigmaR, sigmaI)
LSRA_MCMC_sim(nits=nits, pars[sim,], JumpCV, adapt, parLB[sim,], parUB[sim,], R0ind-1,
inflind-1, slpind-1, RDind-1, nyears, maxage, M[sim], Mat_age[sim,],
Wt_age[sim,], Chist_a[sim,], Umax, hs[sim], CAA, CAAadj, sigmaR)
})
}else {
mcmc <- sapply(1:nsim, function(sim) {
cat(".")
# LSRA_MCMC_sim(nits=nits, pars[sim,], JumpCV, adapt, parLB[sim,], parUB[sim,], R0ind-1,
# inflind-1, slpind-1, RDind-1, nyears, maxage, M[sim], Mat_age[sim,],
# Wt_age[sim,], Chist_a[sim,], Ind, Umax, hs[sim], CAA, CAAadj, sigmaR, sigmaI)
LSRA_MCMC_sim(nits=nits, pars[sim,], JumpCV, adapt, parLB[sim,], parUB[sim,], R0ind-1,
inflind-1, slpind-1, RDind-1, nyears, maxage, M[sim], Mat_age[sim,],
Wt_age[sim,], Chist_a[sim,], Umax, hs[sim], CAA, CAAadj, sigmaR)
})
}
parstr <- aperm(array(unlist(mcmc[1,]), dim=c(npars, nits, nsim)), c(3,1,2))
CAA_pred <- aperm(array(unlist(mcmc[2,]), dim=c(nyears, maxage, nsim)), c(3,1,2))
SSB <- aperm(array(unlist(mcmc[3,]), dim=c(nyears, nsim)), c(2,1))
SSB0 <- unlist(mcmc[4,])
RD <- aperm(array(unlist(mcmc[5,]), dim=c(nyears+maxage, nsim)), c(2,1))
PredF <- aperm(array(unlist(mcmc[6,]), dim=c(nyears, nsim)), c(2,1))
sel <- aperm(array(unlist(mcmc[7,]), dim=c(maxage, nsim)), c(2,1))
if(!is.na(SRAdir))jpeg(paste0(SRAdir,"/SRA_convergence.jpg"),width=7,height=9,units='in',res=400)
if(ploty){
col<-rep(c("blue","red","green","orange","grey","brown","pink","yellow","dark red","dark blue","dark green"),100)
par(mfcol=c(5,2),mai=c(0.7,0.6,0.05,0.1))
pind<-(1:(nits/thin))*thin
matplot(pind,t(parstr[1:nplot,1,pind]),type='l',ylab="log R0",xlab="Iteration")
abline(v=burnin,lty=2)
matplot(pind,t(parstr[(1:nplot),2, pind]),type='l',ylab="log infl (sel)",xlab="Iteration")
abline(v=burnin,lty=2)
matplot(pind,t(parstr[(1:nplot),3,pind]),type='l',ylab="log slp (sel)",xlab="Iteration")
abline(v=burnin,lty=2)
matplot(pind,t(parstr[(1:nplot),4, pind]),type='l',ylab="recdev1",xlab="Iteration")
abline(v=burnin,lty=2)
matplot(pind,t(parstr[(1:nplot), npars, pind]),type='l',ylab="recdev2",xlab="Iteration")
abline(v=burnin,lty=2)
burn<-burnin:nits
plot(density(parstr[, 1,burn],adj=0.7),xlab="log(R0)",main="")
plot(density(parstr[, 2,burn],adj=0.7),xlab="inflection selectivity",main="")
plot(density(parstr[, 3,burn],adj=0.7),xlab="slope selectivity",main="")
plot(density(parstr[, 4, burn],adj=0.7),xlab="recdev1",main="")
plot(density(parstr[, npars, burn],adj=0.7),xlab="recdev2",main="")
}
if(!is.na(SRAdir))dev.off()
if(!is.na(SRAdir))jpeg(paste0(SRAdir,"/SRA predictions.jpg"),width=7,height=11,units='in',res=400)
if(ploty){
par(mfrow=c(6,2),mai=c(0.65,0.6,0.02,0.1))
qq<-apply(SSB,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(qq))
matplot(t(SSB[1:nplot,]),ylim=ylim,type="l",xlab="Year",ylab="SSB")
xs<-dim(SSB)[2]
plot(qq[3,],ylim=ylim,type='l',xlab="Year",ylab="SSB")
polygon(c(1:xs,xs:1),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(1:xs,xs:1),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(qq[3,],lwd=1,col="white")
D<-SSB/SSB0
qq<-apply(D,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(qq))
matplot(t(D[1:nplot,]),ylim=ylim,type="l",xlab="Year",ylab="Depletion")
plot(qq[3,],ylim=ylim,type='l',xlab="Year",ylab="Depletion")
polygon(c(1:xs,xs:1),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(1:xs,xs:1),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(qq[3,],lwd=1,col="white")
qq<-apply(PredF,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(qq))
matplot(t(PredF[1:nplot,]),ylim=ylim,type="l",xlab="Year",ylab="Fish. Mort.")
plot(qq[3,],ylim=ylim,type='l',xlab="Year",ylab="Fish. Mort.")
polygon(c(1:xs,xs:1),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(1:xs,xs:1),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(qq[3,],lwd=1,col="white")
nyears<-dim(CAA)[1]
nages<-dim(CAA)[2]
qq<-apply(sel,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(qq))
xs<-maxage
matplot(t(sel[1:nplot,]),ylim=ylim,type="l",xlab="Age",ylab="Selectivity")
plot(qq[3,],ylim=ylim,type='l',xlab="Age",ylab="Selectivity")
polygon(c(1:xs,xs:1),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(1:xs,xs:1),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(qq[3,],lwd=1,col="white")
RDx<-(-maxage+1):nyears
qq<-apply(RD,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(qq))
xs<-dim(RD)[2]
matplot(RDx,t(RD[1:nplot,]),ylim=ylim,type="l",xlab="Year",ylab="Rec. Dev.")
plot(RDx,qq[3,],ylim=ylim,type='l',xlab="Year",ylab="Rec. Dev.")
polygon(c(RDx,RDx[xs:1]),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(RDx,RDx[xs:1]),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(RDx,qq[3,],lwd=1,col="white")
plot(c(1,nyears),c(1,nages),col='white',xlab="Year",ylab="Age")
legend("top",legend="Observed composition data",bty='n')
points(rep(1:nyears,nages),rep(1:nages,each=nyears),cex=CAA^0.5/max(CAA^0.5,na.rm=T)*1.5,pch=19,col=makeTransparent("dark grey",60))
CAA_pred1<-CAA_pred
CAA_pred1[CAA_pred1<0.002]<-NA
plot(c(1,dim(CAA)[1]),c(1,dim(CAA)[2]),col='white',xlab="Year",ylab="Age")
legend("top",legend="Predicted composition data (1 sim)",bty='n')
points(rep(1:nyears,nages),rep(1:nages,each=nyears),cex=CAA_pred1[1,,]^0.5/max(CAA_pred1[1,,]^0.5,na.rm=T)*1.5,pch=19,col=makeTransparent("dark grey",60))
}
if(!is.na(SRAdir))dev.off()
dep<-SSB[,nyears]/SSB0
procsd<-apply(RD,1,sd,na.rm=T)
procmu <- -0.5 * (procsd)^2 # adjusted log normal mean
OM@D<-quantile(dep,c(0.05,0.95))
OM@Perr<-quantile(procsd,c(0.025,0.975))
getAC<-function(recdev)acf(recdev,plot=F)$acf[2,1,1]
AC<-apply(RD,1,getAC)
OM@AC<-quantile(AC,c(0.05,0.95))
R0 <- exp(parstr[,1,nits])
slp <- exp(parstr[,3,nits])
infl <- exp(parstr[,2,nits])
A5<--(slp*log(1/0.05-1)-infl)
A5[A5 < 0] <- 0
A95<--(slp*log(1/0.95-1)-infl)
L5<-Linf*(1-exp(-K*(A5-t0)))
L95<-Linf*(1-exp(-K*(A95-t0)))
OM@L5<-quantile(L5,c(0.05,0.95))
OM@LFS<-quantile(L95,c(0.05,0.95))
OM@nyears<-nyears
OM@EffYears<-1:OM@nyears
OM@EffLower<-apply(PredF,2,quantile,p=0.05)
OM@EffUpper<-apply(PredF,2,quantile,p=0.95)
OM@nyears<-nyears
Perr<-array(NA,c(nsim,maxage+nyears+proyears-1))
Perr[,1:(nyears+maxage-1)]<-log(RD[,2:(maxage+nyears)])
Perr[,(nyears+maxage):(nyears+maxage+proyears-1)]<-matrix(rnorm(nsim*(proyears),rep(procmu,proyears),rep(procsd,proyears)),nrow=nsim)
for (y in (maxage+nyears):(nyears + proyears+maxage-1)) Perr[, y] <- AC * Perr[, y - 1] + Perr[, y] * (1 - AC * AC)^0.5
Perr<-exp(Perr)
PredF<-PredF/apply(PredF,1,mean) # Find should be mean 1 so qs optimizers are standardized
Wt_age <- array(Wt_age, dim=c(dim=c(nsim, maxage, nyears+proyears)))
Len_age <- array(Len_age, dim=c(nsim, maxage, nyears+proyears))
Marray <- matrix(M, nrow=nsim, ncol=proyears+nyears)
OM@cpars<-list(dep=dep,M=M,procsd=procsd,AC=AC,hs=hs,Linf=Linf,
Wt_age=Wt_age, Len_age=Len_age, Marray=Marray,
K=K,t0=t0,L50=L50,
L5=L5,LFS=L95,Find=PredF,
V=array(sel,c(nsim,maxage,nyears)),Perr=Perr,R0=R0,
SSB=SSB,SSB0=SSB0,RD=RD) # not valid for runMSE code but required
OM
}
#' Stochastic SRA construction of operating models
#'
#' @description Specify an operating model, using catch composition data and a historical catch series. Returns and operating model with depletion (D), selectivity parameters (L5, LFS) and effort trajectory (Effyears, EffLower, EffUpper) filled.
#' @param OM An operating model object with M, growth, stock-recruitment and maturity parameters specified.
#' @param CAA A matrix nyears (rows) by nages (columns) of catch at age (age 1 to maxage in length)
#' @param Chist A vector of historical catch observations (nyears long) going back to unfished conditions
#' @param Ind A vector of historical abundance index observations (assumed proportional to SSB)
#' @param ML A vector of historical mean length (in catch) observations
#' @param CAL A matrix of nyears (row) by n length bins (columns) of catch at length samples
#' @param mulen A vector mean length by length bin, a vector the same as the number of columns of CAL
#' @param wts A vector of relative weights for the likelihood functions of CAA, Chist, Ind, ML and CAL
#' @param Jump_fac A multiplier of the jumping distribution variance to increase acceptance (lower Jump_fac) or decrease acceptance rate (higher Jump_fac)
#' @param nits The number of MCMC iterations
#' @param burnin The number of initial MCMC iterations to discard
#' @param thin The interval over which MCMC samples are extracted for use in graphing / statistics
#' @param ESS Effective sample size - the weighting of the catch at age data
#' @param MLsd The lognormal sd of the mean length observations
#' @param ploty Do you want to see diagnostics plotted?
#' @param nplot how many MCMC samples should be plotted in convergence plots?
#' @param SRAdir A directory where the SRA diagnostics / fit are stored
#' @return A list with three positions. Position 1 is the filled OM object, position 2 is the custompars data.frame that may be submitted as an argument to runMSE() and position 3 is the matrix of effort histories `[nyears x nsim]` vector of objects of class\code{classy}
#' @author T. Carruthers (Canadian DFO grant)
#' @references Walters, C.J., Martell, S.J.D., Korman, J. 2006. A stochastic approach to stock reduction analysis. Can. J. Fish. Aqua. Sci. 63:212-213.
#' @export StochasticSRA
#' @examples
#' \dontrun{
#' setup()
#' sim<-SRAsim(testOM,patchy=0.8)
#' CAA<-sim$CAA
#' Chist<-sim$Chist
#' testOM<-StochasticSRA(testOM,CAA,Chist,nsim=30,nits=1000)
#' runMSE(testOM)
#' }
StochasticSRA<-function(OM,CAA,Chist,Ind=NA,ML=NA,CAL=NA,mulen=NA,wts=c(1,1,0.5,0.1,1),
Jump_fac=1,nits=4000, burnin=500,thin=10,ESS=300,MLsd=0.1,
ploty=T,nplot=6,SRAdir=NA){
OM <- updateMSE(OM) # Check that all required slots in OM object contain values
nyears<-length(Chist)
if(class(Chist)=="matrix")nyears<-nrow(Chist)
maxage<-OM@maxage
if(length(Ind)==1){
Ind<-rep(NA,nyears)
}else{
if(sum(is.na(Ind))<nyears)Ind<-Ind/mean(Ind,na.rm=T) # normalize Ind to mean 1
}
if(length(ML)==1)ML<-rep(NA,nyears)
Umax<-1-exp(-OM@maxF) # get SRA umax from OM
Imiss<-is.na(Ind) # which SSB index observations are missing?
proyears<-OM@proyears
nsim<-OM@nsim
Cobs<-runif(nsim,OM@Cobs[1],OM@Cobs[2]) # sample observation error
Iobs=runif(nsim,OM@Iobs[1],OM@Iobs[2]) # use the OM obs error for index
if (OM@nyears != nyears) {
message("OM@nyears being updated to length Chist: ", nyears)
OM@nyears <- nyears
}
if(sum(is.na(Chist))>0){
message("One or more of the historical annual catch observations is missing. Linear interpolation has been used to fill these data")
Chistold<-Chist
Chist<-approx(Chist)$y
cond<-!is.na(Chistold)
Chist[(1:nyears)[cond]]<-Chistold[(1:nyears)[cond]]
print(data.frame("Catches entered" = Chistold, "Catches interpolated"=Chist))
}
if (dim(CAA)[1] != nyears) stop("Number of CAA rows (", dim(CAA)[1], ") does not equal nyears (", nyears, "). NAs are acceptable")
if (dim(CAA)[2] != OM@maxage) {
message("Number of CAA columns (", dim(CAA)[2], ") does not equal OM@maxage (", OM@maxage, ")")
message("Assuming no CAA for ages greater than ", dim(CAA)[2], ' and filling with 0s')
addages <- OM@maxage-dim(CAA)[2]
CAA2 <- matrix(0, nrow=nrow(CAA), ncol=addages)
CAA <- cbind(CAA, CAA2)
}
if(length(as.vector(CAL))==1){
CALswitch=F # don't do CAL calcs
CALLH<-0 # likelihood contribution is nil
}else{
if (dim(CAL)[1] != nyears) stop("Number of CAL rows (", dim(CAL)[1], ") does not equal nyears (", nyears, "). NAs are acceptable")
if(is.na(mulen[1])) stop("You must specify the argument mulen, which is the mean length of each length bin (columns) of the CAL data")
if (dim(CAL)[2] != length(mulen)) {
stop("The argument mulen (the mean length of each length bin) should be of the same length as the number of columns of the CAL data")
}
CALyrs<-(1:nrow(CAL))[apply(CAL,1,function(x)sum(is.na(x)))<ncol(CAL)]
CALswitch=T
}
nlen<-length(mulen)
if (burnin < 0.05*nits) burnin <- 0.05 * nits
if("nsim"%in%slotNames(OM))nsim<-OM@nsim
if("proyears"%in%slotNames(OM))proyears<-OM@proyears
OM@nsim<-nsim
OM@proyears<-proyears
# Sample custom parameters
SampCpars <- list() # empty list
# custom parameters exist - sample and write to list
if(length(OM@cpars)>0){
# ncparsim<-cparscheck(OM@cpars) # check each list object has the same length and if not stop and error report
SampCpars <- SampleCpars(OM@cpars, nsim)
}
# Sample Stock Parameters
options(warn=-1)
StockPars <- SampleStockPars(OM, nsim, nyears, proyears, SampCpars, msg=FALSE)
options(warn=1)
# Assign Stock pars to function environment
for (X in 1:length(StockPars)) assign(names(StockPars)[X], StockPars[[X]])
agearr<-array(rep(1:maxage,each=nsim),c(nsim,maxage))
Wt_age <- Wt_age[,,nyears] # no time-varying growth
Mat_age<- Mat_age[,,nyears]
Len_age<-Len_age[,,nyears]
# iALK script =================================================
if(CALswitch){
lvar<-runif(nsim,OM@LenCV[1],OM@LenCV[2])
iALK<-array(NA,c(nsim,maxage,nlen))
ind<-as.matrix(expand.grid(1:nsim,1:maxage,1:nlen))
Lind<-ind[,c(1,2)]
iALK[ind]<-dnorm(mulen[ind[,3]],Len_age[Lind],lvar[ind[,1]]*Len_age[Lind])
sums<-apply(iALK,1:2,sum)
sind<-ind[,1:2]
iALK<-iALK/sums[sind]
#contour(x=1:maxage,y=1:nlen,iALK[3,,],nlevels=10)
}
# Sample Fleet Parameters
options(warn=-1)
FleetPars <- SampleFleetPars(SubOM(OM, "Fleet"), Stock=StockPars, nsim,
nyears, proyears, cpars=SampCpars)
options(warn=1)
# Assign Fleet pars to function environment
for (X in 1:length(FleetPars)) assign(names(FleetPars)[X], FleetPars[[X]])
# Sampled arrays
Chist_a<-array(trlnorm(nyears*nsim,1,Cobs)*rep(Chist,each=nsim),c(nsim,nyears)) # Historical catch
# set up mcmc
lnR0<-lninfl<-lnslp<-array(NA,c(nsim,nits))
lnRD<-array(0,c(nsim,nyears+maxage,nits))
LHD<-array(NA,c(nsim,nits))
# if(sfIsRunning())sfExport(list=c("Chist_a"))
if(sfIsRunning()){
R0LB<-sfSapply(1:nsim,LSRA,FF=M*4,Chist_arr=Chist_a,M=M,Mat_age=Mat_age,Wt_age=Wt_age,
sel=Mat_age,Recdevs=array(1,c(nsim,nyears+maxage)),h=hs)
R0UB<-sfSapply(1:nsim,LSRA,FF=M/10,Chist_arr=Chist_a,M=M,Mat_age=Mat_age,Wt_age=Wt_age,
sel=Mat_age,Recdevs=array(1,c(nsim,nyears+maxage)),h=hs)
}else{
R0LB<-sapply(1:nsim,LSRA,FF=M*4,Chist_arr=Chist_a,M=M,Mat_age=Mat_age,Wt_age=Wt_age,
sel=Mat_age,Recdevs=array(1,c(nsim,nyears+maxage)),h=hs)
R0UB<-sapply(1:nsim,LSRA,FF=M/10,Chist_arr=Chist_a,M=M,Mat_age=Mat_age,Wt_age=Wt_age,
sel=Mat_age,Recdevs=array(1,c(nsim,nyears+maxage)),h=hs)
}
R0b=cbind(R0LB-1,R0UB+1)
inflb<-log(c(0.5,maxage*0.5))
slpb<-log(exp(inflb)*c(0.1,2))#c(-3,3)
RDb<-c(-2,2)
# initial guesses
lnR0[,1]<-R0UB#log(apply(Chist_a,1,mean))
lninfl[,1]<-log(maxage/4)
lnslp[,1]<-log(exp(lninfl[,1])*0.2)
lnRD[,,1]<-0
# parameter vector
pars<-c(lnR0[,1],lninfl[,1],lnslp[,1],lnRD[,,1])
npars<-length(pars)
# parameter store
LHstr<-array(NA,c(nsim,nits))
parstr<-array(NA,c(npars,nits))
# parameter indexes
R0ind<-1:nsim
inflind<-(1*nsim)+(1:nsim)
slpind<-(2*nsim)+(1:nsim)
RDind<-(3*nsim+1):length(pars)
# Parameter jumping
JumpCV<-rep(0.05,npars) # R0
JumpCV[inflind]<-0.05
JumpCV[slpind]<-0.05
JumpCV[RDind]<-0.1*mean(procsd) # a function of sigmaR to provide reasonable acceptance rate
JumpCV<-JumpCV*Jump_fac
# parameter censorship
parLB<-parUB<-rep(NA,length(pars))
parLB[R0ind]<-R0b[,1]
parLB[inflind]<-inflb[1]
parLB[slpind]<-slpb[1]
parLB[RDind]<-RDb[1]
parUB[R0ind]<-R0b[,2]
parUB[inflind]<-inflb[2]
parUB[slpind]<-slpb[2]
parUB[RDind]<-RDb[2]
CAAadj=sum(CAA,na.rm=T)/ESS # ESS adjustment to low sample sizes
CALadj=sum(CAL,na.rm=T)/ESS # ESS adjustment to low sample sizes
update<-(1:50)*(nits/50)
adapt<-c(rep(5,100),rep(2.5,100),rep(1,nits-200))
CAA_pred<-array(NA,c(nsim,nyears,maxage))
if(CALswitch){
CAL_pred<-array(NA,c(nsim,nyears,nlen))
CALtemp<-array(NA,c(nsim,maxage,nlen))
CAAind<-cbind(ind[,1],rep(1,nsim*maxage),ind[,2]) # sim, year, age
}
PredF<-MLpred<-SSB<-array(NA,c(nsim,nyears))
for(i in 1:nits){
if(i %in% update){
cat(".")
flush.console()
}
#i<-i+1# debugging
Reject<-rep(FALSE,nsim)
nupars<-rnorm(npars,pars,JumpCV*adapt[i])
nupars[nupars<parLB]<-parLB[nupars<parLB]
nupars[nupars>parUB]<-parUB[nupars>parUB]
if(i==1)nupars=pars
R0<-exp(nupars[R0ind])
infl<-exp(nupars[inflind])
#infl<-(0.05+(infl/(1+infl))*0.45)*maxage
slp<-exp(nupars[slpind])
#slp<-(0.02+(slp/(1+slp))*0.98)*infl
RD<-exp(array(nupars[RDind],c(nsim,nyears+maxage)))
RD<-RD/apply(RD,1,mean)
sel<-1/(1+exp((infl-(agearr))/slp))
# calcs (getting pen as a zero or a 1)
N<-R0*exp(-M*(agearr-1))
SSB0<-apply(N*Mat_age*Wt_age,1,sum)
SSBpR<-SSB0/R0
CAA_pred[]<-NA
if(CALswitch)CAL_pred[]<-0
PredF[]<-NA
MLpred[]<-NA
SSB[]<-NA
for(y in 1:nyears){ # M - F - aging / recruitment
if(y==1)N<-N*RD[,maxage:1]
SSB[,y]<-apply(N*Mat_age*Wt_age,1,sum)
PredN<-N*exp(-M/2)
PredVN<-PredN*sel
CAA_pred[,y,]<-PredVN/apply(PredVN,1,sum)
if(CALswitch){
if(y%in%CALyrs){
CAAind[,2]<-y
CALtemp[ind]<-iALK[ind]*CAA_pred[CAAind]
CAL_pred[,y,]<-CAL_pred[,y,]+apply(CALtemp,c(1,3),sum)
}
}
MLpred[,y]<-apply(CAA_pred[,y,]*Len_age,1,sum)/apply(CAA_pred[,y,],1,sum)
PredVW<-PredVN*Wt_age # Predicted vulnerable weight
Predfrac<-PredVW/apply(PredVW,1,sum) # Catch weight distribution over ages
Cat<-(Chist_a[,y]*Predfrac)/Wt_age # Guess at catch numbers by age
predU<-Cat/PredN # Which means this harvest rate
predU[!is.finite(predU)] <- Inf
cond<-predU>Umax # Check max U
Reject[apply(cond,1,sum)>0]<-TRUE # Reject sims where U > Umax for any age class
Cat[cond]<-Cat[cond]/(predU[cond]/Umax) # Set catch to Umax
PredF[,y]<--log(1-apply(Cat/PredN,1,max)) # apical F
N<-N*exp(-M)-Cat #PredF[,y]*sel)
N[,2:maxage]<-N[,1:(maxage-1)] # aging
N[,1]<-RD[,maxage+y]*(0.8*R0*hs*SSB[,y])/(0.2*SSBpR*R0*(1-hs)+(hs-0.2)*SSB[,y])
N[N<0] <- tiny
}
Ipred<-SSB
Ipred[matrix(rep(Imiss,each=nsim),nrow=nsim)]<-NA
Ipred<-Ipred/apply(Ipred,1,mean,na.rm=T)
Ires<-Ipred/matrix(rep(Ind,each=nsim),nrow=nsim)
MLres<-MLpred/matrix(rep(ML,each=nsim),nrow=nsim)
Ires[Ires<(1E-10)]<-(1E-10)
Ires[Ires>1E10]<-1E10
MLres[MLres<(1E-10)]<-(1E-10)
MLres[MLres>1E10]<-1E10
CAA_pred[CAA_pred<1E-15]<-1E-15
if(CALswitch){
CAL_pred<-CAL_pred/array(apply(CAL_pred,1:2,sum),dim(CAL_pred))
CAL_pred[CAL_pred<1E-15]<-1E-15
}
CAALH<-apply(log(CAA_pred)*
array(rep(CAA,each=nsim)/CAAadj,c(nsim,nyears,maxage)),
1,sum,na.rm=T)
if(CALswitch){
CALLH<-apply(log(CAL_pred[,CALyrs,])*
array(rep(CAL[CALyrs,],each=nsim)/CALadj,c(nsim,length(CALyrs),nlen)),
1,sum,na.rm=T)
}
RDLH<-apply(matrix(dnorm(nupars[RDind],-(procsd^2)/2,procsd,log=T),nrow=nsim),1,sum)
ILH<-apply(dnorm(log(Ires),-(Iobs^2)/2,Iobs,log=T),1,sum,na.rm=T)
MLLH<-apply(dnorm(log(MLres),-(MLsd^2)/2,MLsd,log=T),1,sum,na.rm=T)
LH<-wts[1]*CAALH+wts[2]*RDLH+wts[3]*ILH+wts[4]*MLLH+wts[5]*CALLH
# Reject / accept (cond)
if(i > 1){
Accept<-runif(nsim)<exp(LH-LHstr[,i-1])
Accept[Reject]<-FALSE
LHstr[,i]<-LHstr[,i-1]
LHstr[Accept,i]<-LH[Accept]
Aind<-rep(Accept,npars/nsim) # The correct index in the pars vector for the accepted simulations
parstr[,i]<-parstr[,i-1]
parstr[Aind,i]<-nupars[Aind]
pars<-parstr[,i]
# print(rbind(Reject,Accept))
}else{
parstr[,i]<-pars
LHstr[,i]<-LH
}
} # End of MCMC
if(!is.na(SRAdir))jpeg(paste0(SRAdir,"/SRA_convergence.jpg"),width=7,height=9,units='in',res=400)
if(ploty){
col<-rep(c("blue","red","green","orange","grey","brown","pink","yellow","dark red","dark blue","dark green"),100)
par(mfcol=c(5,2),mai=c(0.7,0.6,0.05,0.1))
pind<-(1:(nits/thin))*thin
matplot(pind,t(parstr[1:nplot,pind]),type='l',ylab="log R0",xlab="Iteration")
abline(v=burnin,lty=2)
matplot(pind,t(parstr[nsim+(1:nplot),pind]),type='l',ylab="log infl (sel)",xlab="Iteration")
abline(v=burnin,lty=2)
matplot(pind,t(parstr[(nsim*2)+(1:nplot),pind]),type='l',ylab="log slp (sel)",xlab="Iteration")
abline(v=burnin,lty=2)
matplot(pind,t(parstr[(nsim*30)+(1:nplot),pind]),type='l',ylab="recdev1",xlab="Iteration")
abline(v=burnin,lty=2)
matplot(pind,t(parstr[(nsim*40)+(1:nplot),pind]),type='l',ylab="recdev2",xlab="Iteration")
abline(v=burnin,lty=2)
burn<-burnin:nits
plot(density(parstr[1:nsim,burn],adj=0.7),xlab="log(R0)",main="")
plot(density(parstr[nsim+(1:nsim),burn],adj=0.7),xlab="inflection selectivity",main="")
plot(density(parstr[(nsim*2)+(1:nsim),burn],adj=0.7),xlab="slope selectivity",main="")
plot(density(parstr[(nsim*30)+(1:nsim),burn],adj=0.7),xlab="recdev1",main="")
plot(density(parstr[(nsim*40)+(1:nsim),burn],adj=0.7),xlab="recdev2",main="")
}
if(!is.na(SRAdir))dev.off()
if(!is.na(SRAdir))jpeg(paste0(SRAdir,"/SRA predictions.jpg"),width=7,height=11,units='in',res=400)
if(ploty){
par(mfrow=c(6,2),mai=c(0.65,0.6,0.02,0.1))
qq<-apply(SSB,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(qq))
matplot(t(SSB[1:nplot,]),ylim=ylim,type="l",xlab="Year",ylab="SSB")
xs<-dim(SSB)[2]
plot(qq[3,],ylim=ylim,type='l',xlab="Year",ylab="SSB")
polygon(c(1:xs,xs:1),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(1:xs,xs:1),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(qq[3,],lwd=1,col="white")
D<-SSB/SSB0
qq<-apply(D,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(qq))
matplot(t(D[1:nplot,]),ylim=ylim,type="l",xlab="Year",ylab="Depletion")
plot(qq[3,],ylim=ylim,type='l',xlab="Year",ylab="Depletion")
polygon(c(1:xs,xs:1),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(1:xs,xs:1),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(qq[3,],lwd=1,col="white")
qq<-apply(PredF,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(qq))
matplot(t(PredF[1:nplot,]),ylim=ylim,type="l",xlab="Year",ylab="Fish. Mort.")
plot(qq[3,],ylim=ylim,type='l',xlab="Year",ylab="Fish. Mort.")
polygon(c(1:xs,xs:1),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(1:xs,xs:1),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(qq[3,],lwd=1,col="white")
nyears<-dim(CAA)[1]
nages<-dim(CAA)[2]
qq<-apply(sel,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(qq))
xs<-maxage
matplot(t(sel[1:nplot,]),ylim=ylim,type="l",xlab="Age",ylab="Selectivity")
plot(qq[3,],ylim=ylim,type='l',xlab="Age",ylab="Selectivity")
polygon(c(1:xs,xs:1),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(1:xs,xs:1),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(qq[3,],lwd=1,col="white")
RDx<-(-maxage+1):nyears
qq<-apply(RD,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(qq))
xs<-dim(RD)[2]
matplot(RDx,t(RD[1:nplot,]),ylim=ylim,type="l",xlab="Year",ylab="Rec. Dev.")
plot(RDx,qq[3,],ylim=ylim,type='l',xlab="Year",ylab="Rec. Dev.")
polygon(c(RDx,RDx[xs:1]),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(RDx,RDx[xs:1]),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(RDx,qq[3,],lwd=1,col="white")
plot(c(1,nyears),c(1,nages),col='white',xlab="Year",ylab="Age")
legend("top",legend="Observed composition data",bty='n')
points(rep(1:nyears,nages),rep(1:nages,each=nyears),cex=CAA^0.5/max(CAA^0.5,na.rm=T)*1.5,pch=19,col=makeTransparent("dark grey",60))
CAA_pred1<-CAA_pred
CAA_pred1[CAA_pred1<0.002]<-NA
plot(c(1,dim(CAA)[1]),c(1,dim(CAA)[2]),col='white',xlab="Year",ylab="Age")
legend("top",legend="Predicted composition data (1 sim)",bty='n')
points(rep(1:nyears,nages),rep(1:nages,each=nyears),cex=CAA_pred1[1,,]^0.5/max(CAA_pred1[1,,]^0.5,na.rm=T)*1.5,pch=19,col=makeTransparent("dark grey",60))
}
if(!is.na(SRAdir))dev.off()
dep<-SSB[,nyears]/SSB0
procsd<-apply(RD,1,sd,na.rm=T)
procmu <- -0.5 * (procsd)^2 # adjusted log normal mean
OM@D<-quantile(dep,c(0.05,0.95))
OM@Perr<-quantile(procsd,c(0.025,0.975))
getAC<-function(recdev)acf(recdev,plot=F)$acf[2,1,1]
AC<-apply(RD,1,getAC)
OM@AC<-quantile(AC,c(0.05,0.95))
A5<--(slp*log(1/0.05-1)-infl)
A95<--(slp*log(1/0.95-1)-infl)
L5<-Linf*(1-exp(-K*(A5-t0)))
L95<-Linf*(1-exp(-K*(A95-t0)))
OM@L5<-quantile(L5,c(0.05,0.95))
OM@LFS<-quantile(L95,c(0.05,0.95))
OM@nyears<-nyears
OM@EffYears<-1:OM@nyears
OM@EffLower<-apply(PredF,2,quantile,p=0.05)
OM@EffUpper<-apply(PredF,2,quantile,p=0.95)
OM@nyears<-nyears
Perr<-array(NA,c(nsim,maxage+nyears+proyears-1))
Perr[,1:(nyears+maxage-1)]<-log(RD[,2:(maxage+nyears)])
Perr[,(nyears+maxage):(nyears+maxage+proyears-1)]<-matrix(rnorm(nsim*(proyears),rep(procmu,proyears),rep(procsd,proyears)),nrow=nsim)
for (y in (maxage+nyears):(nyears + proyears+maxage-1)) Perr[, y] <- AC * Perr[, y - 1] + Perr[, y] * (1 - AC * AC)^0.5
Perr<-exp(Perr)
PredF<-PredF/apply(PredF,1,mean) # Find should be mean 1 so qs optimizers are standardized
Wt_age <- array(Wt_age, dim=c(dim=c(nsim, maxage, nyears+proyears)))
Len_age <- array(Len_age, dim=c(nsim, maxage, nyears+proyears))
Marray <- matrix(M, nrow=nsim, ncol=proyears+nyears)
OM@cpars<-list(D=dep,M=M,procsd=procsd,AC=AC,hs=hs,Linf=Linf,
Wt_age=Wt_age, Len_age=Len_age, Marray=Marray,
K=K,t0=t0,L50=L50,
L5=L5,LFS=L95,Find=PredF,
V=array(sel,c(nsim,maxage,nyears)),Perr=Perr,R0=R0,
Iobs=apply(Ires,1,sd,na.rm=T),
SSB=SSB,SSB0=SSB0,RD=RD) # not valid for runMSE code but required
OM
}
#' Generic comparison plot for simulation testing of Stochastic SRA method
#'
#' @description Plots simulation variables versus estimation variables for Stochastic SRA methods of conditioning operating models.
#' @param simy The simulated time series
#' @param samy The matrix of estimated time series from of StochasticSRA() function.
#' @param xlab The x axis label for the plot
#' @param ylab The y axis label for the plot
#' @param maxplot The total number of individual simulations to be plotted in the first plot
#' @param type Should a line 'l' or points 'p' be plotted?
#' @return A plot
#' @author T. Carruthers (Canadian DFO grant)
#' @export compplot
#' @examples
#' nyears<-100
#' nsims<-200
#' simy<-sin(seq(0,2,length.out=nyears))
#' samy<-array(rep(simy,each=nsims)*rnorm(nsims,1,0.2)*rnorm(nsims*nyears,1,0.1),c(nsims,nyears))
#' par(mfrow=c(1,2))
#' compplot(simy,samy,xlab="Year",ylab="Some time varying parameter")
compplot<-function(simy,samy,xlab="",ylab="",maxplot=10,type="l"){
col<-rep(c("blue","red","green","orange","grey","brown","pink","yellow","dark red","dark blue","dark green"),100)
nsim<-dim(samy)[1]
xs<-dim(samy)[2]
qq<-apply(samy,2,quantile,p=c(0.05,0.25,0.5,0.75,0.95))
ylim<-c(0,max(simy,qq))
plot(simy,ylim=ylim,type=type,xlab=xlab,ylab=ylab)
for(i in 1:min(nsim,maxplot))lines(samy[i,],col=col[i])
if(type=="l")lines(simy,lwd=3)
if(type=="p")points(simy,pch=19)
plot(simy,ylim=ylim,type='l',xlab=xlab,ylab=ylab)
polygon(c(1:xs,xs:1),c(qq[1,],qq[5,xs:1]),border=NA,col='light grey')
polygon(c(1:xs,xs:1),c(qq[2,],qq[4,xs:1]),border=NA,col='dark grey')
lines(qq[3,],lwd=1,col="white")
if(type=="l")lines(simy,lwd=3)
if(type=="p")points(simy,pch=19)
}
#' Plot simulation test of Stochastic SRA method
#'
#' @description Plots simulation variables versus estimation variables for Stochastic SRA methods of conditioning operating models.
#' @param sim The output list object of SRAsim() function.
#' @param OM The output object of StochasticSRA() function.
#' @param outfile The name of the figure (something.jpg) you wish to make using SRAcomp
#' @param maxplot The maximum number of simulations to plot
#' @author T. Carruthers (Canadian DFO grant)
#' @export SRAcomp
#' @examples
#' \dontrun{
#' sim<-SRAsim(testOM,qmult=1,patchy=0.8)
#' CAA<-sim$CAA
#' Chist<-sim$Chist
#' testOM<-StochasticSRA(testOM,CAA,Chist,nsim=30,nits=500)
#' SRAcomp(sim,testOM)
#' }
SRAcomp<-function(sim,OM,outfile=NA,maxplot=10){
sam<-OM@cpars
if(!is.na(outfile))jpeg(outfile,width=7,height=9,units='in',res=400)
nsim<-dim(sam$SSB)[1]
nyears<-dim(sam$SSB)[2]
maxage<- dim(sim$CAA)[2]
PEest<-"Perr"%in%names(sam)
if(PEest)par(mfrow=c(4,2),mai=c(0.5,0.7,0.05,0.05))
if(!PEest)par(mfrow=c(3,2),mai=c(0.5,0.7,0.05,0.05))
# SSB
compplot(sim$SSB,sam$SSB,xlab="Year",ylab="SSB",maxplot=maxplot)
# Depletion
compplot(sim$SSB/sim$SSB0,sam$SSB/sam$SSB0,xlab="Year",ylab="Depletion",maxplot=maxplot)
# Recdevs
if(PEest)compplot(sim$Recdevs,sam$RD,xlab="Year",ylab="log recruitment",type="l",maxplot=maxplot)
# Selectivity
compplot(sim$sel,sam$V[,,1],xlab="Age",ylab="Selectivity",type="l",maxplot=maxplot)
legend('bottomright',legend=c("Simulated","Estimated 90% PI","Estimated 50% PI"),bty='n',text.col=c("black","grey","dark grey"),text.font=rep(2,3))
if(!is.na(outfile))dev.off()
}
#' Estimates R0 using SRA to match current F estimates and avoid penalities for low stock sizes
#'
#' @param x a position in the various arrays and vectors that corresponds with a simulation (for use with sapply)
#' @param FF a vector of recent fishign mortality rates (apical Fs)
#' @param Chist_arr a vector of historical catch observations `[nyears]`
#' @param M a vector of natural mortality rates `[nsim]`
#' @param Mat_age a matrix of maturity at age `[nsim x nage]`
#' @param Wt_age a matrix of weight at age `[nsim x nage]`
#' @param sel a matrix of selectivity at age `[nsim x nage]`
#' @param Recdevs a matrix of recruitment deviations `[nsim x nyears]`
#' @param h a vector of steepness values of the Bev-Holt Stock-Recruitment relationship
#' @return all package data objects are placed in the global namespace \code{dir}
#' @export LSRA
#' @author T. Carruthers
LSRA<-function(x,FF,Chist_arr,M,Mat_age,Wt_age,sel,Recdevs,h){
maxage<-ncol(Mat_age)
SSB0guess<-sum(Chist_arr[x,])*c(0.05,100)
SSBpR<-sum(exp(-M[x]*(0:(maxage-1)))*Mat_age[x,]*Wt_age[x,])
R0range=SSB0guess/SSBpR
# Modes 1:obj 2:Fpred 3:depletion 4:R0 5:Ffit
opt<-optimize(LSRA_opt,interval=log(R0range),
FF_a=FF[x],
Chist=Chist_arr[x,],
M_a=M[x],
Mat_age_a=Mat_age[x,],
Wt_age_a=Wt_age[x,],
sel_a=sel[x,],
Recdevs_a=Recdevs[x,],
h_a=h[x])
opt$minimum
}
#' Alternative version of LSRA that's a wrapper for LSRA_opt to return the right type of output (mode) using sapply
#'
#' @param x a position in the various arrays and vectors that corresponds with a simulation (for use with sapply)
#' @param lnR0s a vector nsim long that are estimated R0 values
#' @param FF a vector of recent fishign mortality rates (apical Fs)
#' @param Chist a vector of historical catch observations `[nyears]`
#' @param M a vector of natural mortality rates `[nsim]`
#' @param Mat_age a matrix of maturity at age `[nsim x nage]`
#' @param Wt_age a matrix of weight at age `[nsim x nage]`
#' @param sel a matrix of selectivity at age `[nsim x nage]`
#' @param Recdevs a matrix of recruitment deviations `[nsim x nyears]`
#' @param h a vector of steepness values of the Bev-Holt Stock-Recruitment relationship
#' @param mode optimization or plotting
#' @return all package data objects are placed in the global namespace \code{dir}
#' @export LSRA2
#' @author T. Carruthers
LSRA2<-function(x,lnR0s,FF,Chist,M,Mat_age,Wt_age,sel,Recdevs,h,mode=2){
LSRA_opt(lnR0s[x], FF_a=FF[x], Chist=Chist[x,], M_a=M[x],
Mat_age_a=Mat_age[x,],Wt_age_a=Wt_age[x,],
sel_a=sel[x,],Recdevs_a=Recdevs[x,],h_a=h[x],mode=mode)
}
#' Internal estimation function for LSRA and LSRA2 functions
#'
#' @param param a numeric value representing log(R0)
#' @param FF_a numeric value, recent fishign mortality rate (apical F)
#' @param Chist a vector of historical catch observations `[nyears]`
#' @param M_a numeric value, natural mortality rate
#' @param Mat_age_a a vector of maturity at age `[nage]`
#' @param Wt_age_a a vector of weight at age `[nage]`
#' @param sel_a a vector of selectivity at age `[nage]`
#' @param Recdevs_a a vector of recruitment deviations `[nyears]`
#' @param h_a a numeric value of steepness values of the Bev-Holt Stock-Recruitment relationship
#' @param Umax maximum harvest rate per year
#' @param mode 1-5 see below
#' @return depends on mode but could be 1:objective function 2:trajectory of Fs 3: SSB depletion 4:log(R0) 5:diagnostic plots
#' @export LSRA_opt
#' @author T. Carruthers
LSRA_opt<-function(param,FF_a,Chist,M_a,Mat_age_a,Wt_age_a,sel_a,Recdevs_a,h_a,Umax=0.5,mode=1){
nyears<-length(Chist)
maxage<-length(Mat_age_a)
R0<-exp(param)
N<-R0*exp(-M_a*(0:(maxage-1)))
Nstr<-array(NA,c(nyears,maxage))
SSB0<-sum(N*Mat_age_a*Wt_age_a)
SSBpR<-SSB0/R0
pen<-0
PredF<-SSB<-rep(NA,nyears)
for(y in 1:nyears){
SSB[y]<-sum(N*Mat_age_a*Wt_age_a)
PredN<-N*exp(-M_a/2)
PredVN<-PredN*sel_a
PredVW<-PredVN*Wt_age_a
Predfrac<-PredVW/sum(PredVW)
Cat<-Chist[y]*Predfrac
predU<-Cat/(PredN*Wt_age_a)
cond<-predU>Umax
if(sum(cond)>0){
pen<-pen+sum(abs(predU[cond]-Umax)^2)
Cat[cond]<-Cat[cond]/(predU[cond]/Umax)
}
PredF[y]<--log(1-max(Cat/(N*Wt_age_a)))
N<-N*exp(-M_a-PredF[y]*sel_a)
N[2:maxage]<-N[1:(maxage-1)] # aging
N[1]<-Recdevs_a[y]*(0.8*R0*h_a*SSB[y])/(0.2*SSBpR*R0*(1-h_a)+(h_a-0.2)*SSB[y])
Nstr[y,]<-N
}
mupredF<-mean(PredF[(nyears-15):(nyears-5)])
if(mode==1){
return(pen+(log(mupredF)-log(FF_a))^2)
}else if(mode==2){
return(PredF)
}else if(mode==3){
return(SSB/SSB0)
}else if(mode==4){
return(param)
}else if(mode==5){
return(SSB)
}else{
par(mfrow=c(2,1))
plot(PredF)
abline(h=FF_a,col="red")
abline(h=mupredF,col="blue")
plot(Chist)
}
}
|
library(dad)
### Name: rmrow.folder
### Title: Remove rows in all elements of a folder
### Aliases: rmrow.folder
### ** Examples
data(iris)
iris.fold <- as.folder(iris, "Species")
rmrow.folder(iris.fold, as.character(seq(1, 150, by = 2)))
| /data/genthat_extracted_code/dad/examples/rmrowfolder.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 247 | r | library(dad)
### Name: rmrow.folder
### Title: Remove rows in all elements of a folder
### Aliases: rmrow.folder
### ** Examples
data(iris)
iris.fold <- as.folder(iris, "Species")
rmrow.folder(iris.fold, as.character(seq(1, 150, by = 2)))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/organizations_operations.R
\name{organizations_cancel_handshake}
\alias{organizations_cancel_handshake}
\title{Cancels a handshake}
\usage{
organizations_cancel_handshake(HandshakeId)
}
\arguments{
\item{HandshakeId}{[required] The unique identifier (ID) of the handshake that you want to cancel. You
can get the ID from the ListHandshakesForOrganization operation.
The \href{http://wikipedia.org/wiki/regex}{regex pattern} for handshake ID
string requires \"h-\" followed by from 8 to 32 lowercase letters or
digits.}
}
\description{
Cancels a handshake. Canceling a handshake sets the handshake state to
\code{CANCELED}.
}
\details{
This operation can be called only from the account that originated the
handshake. The recipient of the handshake can\'t cancel it, but can use
DeclineHandshake instead. After a handshake is canceled, the recipient
can no longer respond to that handshake.
After you cancel a handshake, it continues to appear in the results of
relevant APIs for only 30 days. After that, it\'s deleted.
}
\section{Request syntax}{
\preformatted{svc$cancel_handshake(
HandshakeId = "string"
)
}
}
\examples{
\dontrun{
# Bill previously sent an invitation to Susan's account to join his
# organization. He changes his mind and decides to cancel the invitation
# before Susan accepts it. The following example shows Bill's
# cancellation:
#
svc$cancel_handshake(
HandshakeId = "h-examplehandshakeid111"
)
}
}
\keyword{internal}
| /paws/man/organizations_cancel_handshake.Rd | permissive | johnnytommy/paws | R | false | true | 1,529 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/organizations_operations.R
\name{organizations_cancel_handshake}
\alias{organizations_cancel_handshake}
\title{Cancels a handshake}
\usage{
organizations_cancel_handshake(HandshakeId)
}
\arguments{
\item{HandshakeId}{[required] The unique identifier (ID) of the handshake that you want to cancel. You
can get the ID from the ListHandshakesForOrganization operation.
The \href{http://wikipedia.org/wiki/regex}{regex pattern} for handshake ID
string requires \"h-\" followed by from 8 to 32 lowercase letters or
digits.}
}
\description{
Cancels a handshake. Canceling a handshake sets the handshake state to
\code{CANCELED}.
}
\details{
This operation can be called only from the account that originated the
handshake. The recipient of the handshake can\'t cancel it, but can use
DeclineHandshake instead. After a handshake is canceled, the recipient
can no longer respond to that handshake.
After you cancel a handshake, it continues to appear in the results of
relevant APIs for only 30 days. After that, it\'s deleted.
}
\section{Request syntax}{
\preformatted{svc$cancel_handshake(
HandshakeId = "string"
)
}
}
\examples{
\dontrun{
# Bill previously sent an invitation to Susan's account to join his
# organization. He changes his mind and decides to cancel the invitation
# before Susan accepts it. The following example shows Bill's
# cancellation:
#
svc$cancel_handshake(
HandshakeId = "h-examplehandshakeid111"
)
}
}
\keyword{internal}
|
library(caret)
library(randomForest)
# Setting up for Random Forest predictor.
# Initializing data. 'mtcars' dataset is used.
data("mtcars")
# To show structure in UI
dataStructure <- capture.output(str(mtcars))
# Setting up the random generator seed.
set.seed(553311)
# Defining custom training controls with cross validation.
customTrainControl <- trainControl(method = "cv", number = 10)
# Building Random Forest model function.
# in order to regenerate the model when the user change parameters in the UI.
# The goal of this model is to predict 'mpg' (miles per gallon) using the rest
# of the variables.
carsRandomForestModelBuilder <- function() {
return(
train(
mpg ~ .,
data = mtcars,
method = "rf",
trControl = customTrainControl
)
)
}
# Predictor function. It will be invoked 'reactively'.
randomForestPredictor <- function(model, parameters) {
prediction <- predict(
model,
newdata = parameters
)
return(prediction)
}
| /DDP_Week4Assignment/modelBuilding.R | no_license | deysantanu84/coursera_data_science | R | false | false | 1,078 | r | library(caret)
library(randomForest)
# Setting up for Random Forest predictor.
# Initializing data. 'mtcars' dataset is used.
data("mtcars")
# To show structure in UI
dataStructure <- capture.output(str(mtcars))
# Setting up the random generator seed.
set.seed(553311)
# Defining custom training controls with cross validation.
customTrainControl <- trainControl(method = "cv", number = 10)
# Building Random Forest model function.
# in order to regenerate the model when the user change parameters in the UI.
# The goal of this model is to predict 'mpg' (miles per gallon) using the rest
# of the variables.
carsRandomForestModelBuilder <- function() {
return(
train(
mpg ~ .,
data = mtcars,
method = "rf",
trControl = customTrainControl
)
)
}
# Predictor function. It will be invoked 'reactively'.
randomForestPredictor <- function(model, parameters) {
prediction <- predict(
model,
newdata = parameters
)
return(prediction)
}
|
#Sex-Linked Inheritance
A <- scan('rosalind_afrq.txt',sep = ' ')
B <- c()
for (each in A){
p = sqrt(each)
q = 1-p
B <- c(B, (2*p*q + p**2))
}
cat (B)
| /Rosalind/Counting Disease Carriers.R | no_license | alecmchiu/CS124 | R | false | false | 165 | r | #Sex-Linked Inheritance
A <- scan('rosalind_afrq.txt',sep = ' ')
B <- c()
for (each in A){
p = sqrt(each)
q = 1-p
B <- c(B, (2*p*q + p**2))
}
cat (B)
|
#' Sensitivity analysis for misclassification.
#'
#' Simple sensitivity analysis for misclassification.
#'
#' @param case Outcome variable. If a variable, this variable is tabulated against.
#' @param exposed Exposure variable.
#' @param implement Deprecated; please use type instead.
#' @param type Choice of misclassification:
#' \enumerate{
#' \item Exposure: bias analysis for exposure misclassification; corrections using
#' sensitivity and specificity: nondifferential and independent errors,
#' \item Outcome: bias analysis for outcome misclassification.
#' }
#' @param bias Vector defining the bias parameters. This vector has 4 elements
#' between 0 and 1, in the following order:
#' \enumerate{
#' \item Sensitivity of exposure (or outcome) classification among those with the
#' outcome,
#' \item Sensitivity of exposure (or outcome) classification among those without
#' the outcome,
#' \item Specificity of exposure (or outcome) classification among those with the
#' outcome,and
#' \item Specificity of exposure (or outcome) classification among those without
#' the outcome.
#' }
#' @param alpha Significance level.
#' @param dec Number of decimals in the printout.
#' @param print A logical scalar. Should the results be printed?
#'
#' @return A list with elements:
#' \item{obs.data}{The analysed 2 x 2 table from the observed data.}
#' \item{corr.data}{The expected observed data given the true data assuming
#' misclassfication.}
#' \item{obs.measures}{A table of observed relative risk and odds ratio with
#' confidence intervals.}
#' \item{adj.measures}{A table of adjusted relative risk and odds ratio.}
#' \item{bias.parms}{Input bias parameters.}
#'
#' @references Lash, T.L., Fox, M.P, Fink, A.K., 2009 \emph{Applying Quantitative
#' Bias Analysis to Epidemiologic Data}, pp.79--108, Springer.
#'
#' @examples
#' # The data for this example come from:
#' # Fink, A.K., Lash, T.L. A null association between smoking during pregnancy
#' # and breast cancer using Massachusetts registry data (United States).
#' # Cancer Causes Control 2003;14:497-503.
#' misclassification(matrix(c(215, 1449, 668, 4296),
#' dimnames = list(c("Breast cancer+", "Breast cancer-"),
#' c("Smoker+", "Smoker-")),
#' nrow = 2, byrow = TRUE),
#' type = "exposure",
#' bias = c(.78, .78, .99, .99))
#' misclassification(matrix(c(4558, 3428, 46305, 46085),
#' dimnames = list(c("AMI death+", "AMI death-"),
#' c("Male+", "Male-")),
#' nrow = 2, byrow = TRUE),
#' type = "outcome",
#' bias = c(.53, .53, .99, .99))
#' @export
#' @importFrom stats qnorm
misclassification <- function(case,
exposed,
implement = c("exposure", "outcome"),
type = c("exposure", "outcome"),
bias = NULL,
alpha = 0.05,
dec = 4,
print = TRUE){
if (!missing(implement)) {
warning("Argument implement is deprecated; please use type instead.",
call. = FALSE)
type <- implement
}
if(is.null(bias))
bias <- c(1, 1, 1, 1)
else bias <- bias
if(length(bias) != 4)
stop('The argument bias should be made of the following components: (1) Sensitivity of exposure classification among those with the outcome, (2) Sensitivity of exposure classification among those without the outcome, (3) Specificity of exposure classification among those with the outcome, and (4) Specificity of exposure classification among those without the outcome.')
if(!all(bias >= 0 & bias <=1))
stop('Bias parameters should be between 0 and 1.')
if(inherits(case, c("table", "matrix")))
tab <- case
else tab <- table(case, exposed)
tab <- tab[1:2, 1:2]
a <- tab[1, 1]
b <- tab[1, 2]
c <- tab[2, 1]
d <- tab[2, 2]
type <- match.arg(type)
if (type == "exposure") {
obs.rr <- (a/(a + c)) / (b/(b + d))
se.log.obs.rr <- sqrt((c/a) / (a+c) + (d/b) / (b+d))
lci.obs.rr <- exp(log(obs.rr) - qnorm(1 - alpha/2) * se.log.obs.rr)
uci.obs.rr <- exp(log(obs.rr) + qnorm(1 - alpha/2) * se.log.obs.rr)
obs.or <- (a/b) / (c/d)
se.log.obs.or <- sqrt(1/a + 1/b + 1/c + 1/d)
lci.obs.or <- exp(log(obs.or) - qnorm(1 - alpha/2) * se.log.obs.or)
uci.obs.or <- exp(log(obs.or) + qnorm(1 - alpha/2) * se.log.obs.or)
A <- (a - (1 - bias[3]) * (a + b)) / (bias[1] - (1 - bias[3]))
C <- (c - (1 - bias[4]) * (c + d)) / (bias[2] - (1 - bias[4]))
B <- (a + b) - A
D <- (c + d) - C
if(A < 1 | B < 1 | C < 1 | D < 1)
stop('Parameters chosen lead to negative cell(s) in adjusted 2x2 table.')
corr.tab <- matrix(c(A, B, C, D), nrow = 2, byrow = TRUE)
corr.rr <- (A/(A + C)) / (B/(B + D))
corr.or <- (A/B) / (C/D)
if (is.null(rownames(tab)))
rownames(tab) <- paste("Row", 1:2)
if (is.null(colnames(tab)))
colnames(tab) <- paste("Col", 1:2)
if (is.null(rownames(tab))){
rownames(corr.tab) <- paste("Row", 1:2)
} else {
rownames(corr.tab) <- row.names(tab)
}
if (is.null(colnames(tab))){
colnames(corr.tab) <- paste("Col", 1:2)
} else {
colnames(corr.tab) <- colnames(tab)
}
if (print)
cat("Observed Data:",
"\n--------------",
"\nOutcome :", rownames(tab)[1],
"\nComparing :", colnames(tab)[1], "vs.", colnames(tab)[2], "\n\n")
if (print)
print(round(tab, dec))
if (print)
cat("\nCorrected Data:",
"\n--------------------\n\n")
if (print)
print(round(corr.tab, dec))
if (print)
cat("\n")
rmat <- rbind(c(obs.rr, lci.obs.rr, uci.obs.rr), c(obs.or, lci.obs.or, uci.obs.or))
rownames(rmat) <- c(" Observed Relative Risk:", " Observed Odds Ratio:")
colnames(rmat) <- c(" ", paste(100 * (1 - alpha), "% conf.",
sep = ""), "interval")
rmatc <- rbind(corr.rr, corr.or)
rownames(rmatc) <- c("Corrected Relative Risk:",
" Corrected Odds Ratio:")
if (print)
cat("Observed Measures of Exposure-Outcome Relationship:",
"\n-----------------------------------------------------------------------------------\n\n")
if (print)
print(round(rmat, dec))
if (print)
cat("Corrected Relative Risk:", round(corr.rr, dec), "\n Corrected Odds Ratio:", round(corr.or, dec), "\n")
if (print)
cat("\nBias Parameters:",
"\n----------------\n\n")
if (print)
cat("Se(Outcome+):", bias[1],
"\nSe(Outcome-):", bias[2],
"\nSp(Outcome+):", bias[3],
"\nSp(Outcome-):", bias[4],
"\n")
}
if (type == "outcome"){
obs.rr <- (a/(a + c)) / (b/(b + d))
se.log.obs.rr <- sqrt((c/a) / (a+c) + (d/b) / (b+d))
lci.obs.rr <- exp(log(obs.rr) - qnorm(1 - alpha/2) * se.log.obs.rr)
uci.obs.rr <- exp(log(obs.rr) + qnorm(1 - alpha/2) * se.log.obs.rr)
obs.or <- (a/b) / (c/d)
se.log.obs.or <- sqrt(1/a + 1/b + 1/c + 1/d)
lci.obs.or <- exp(log(obs.or) - qnorm(1 - alpha/2) * se.log.obs.or)
uci.obs.or <- exp(log(obs.or) + qnorm(1 - alpha/2) * se.log.obs.or)
A <- (a - (1 - bias[3]) * (a + c)) / (bias[1] - (1 - bias[3]))
B <- (b - (1 - bias[4]) * (b + d)) / (bias[2] - (1 - bias[4]))
C <- (a + c) - A
D <- (b + d) - B
if(A < 1 | B < 1 | C < 1 | D < 1)
stop('Parameters chosen lead to negative cell(s) in adjusted 2x2 table.')
corr.tab <- matrix(c(A, B, C, D), nrow = 2, byrow = TRUE)
corr.rr <- (A/(A + C)) / (B/(B + D))
corr.or <- (A/B) / (C/D)
if (is.null(rownames(tab)))
rownames(tab) <- paste("Row", 1:2)
if (is.null(colnames(tab)))
colnames(tab) <- paste("Col", 1:2)
if (is.null(rownames(tab))){
rownames(corr.tab) <- paste("Row", 1:2)
} else {
rownames(corr.tab) <- row.names(tab)
}
if (is.null(colnames(tab))){
colnames(corr.tab) <- paste("Col", 1:2)
} else {
colnames(corr.tab) <- colnames(tab)
}
if (print)
cat("Observed Data:",
"\n--------------",
"\nOutcome :", rownames(tab)[1],
"\nComparing :", colnames(tab)[1], "vs.", colnames(tab)[2], "\n\n")
if (print)
print(round(tab, dec))
if (print)
cat("\nCorrected Data:",
"\n--------------------\n\n")
if (print)
print(round(corr.tab, dec))
if (print)
cat("\n")
rmat <- rbind(c(obs.rr, lci.obs.rr, uci.obs.rr), c(obs.or, lci.obs.or, uci.obs.or))
rownames(rmat) <- c(" Observed Relative Risk:", " Observed Odds Ratio:")
colnames(rmat) <- c(" ", paste(100 * (1 - alpha), "% conf.",
sep = ""), "interval")
rmatc <- rbind(corr.rr, corr.or)
rownames(rmatc) <- c("Corrected Relative Risk:",
" Corrected Odds Ratio:")
if (print)
cat("Observed Measures of Exposure-Outcome Relationship:",
"\n-----------------------------------------------------------------------------------\n\n")
if (print)
print(round(rmat, dec))
if (print)
cat("Corrected Relative Risk:", round(corr.rr, dec), "\n Corrected Odds Ratio:", round(corr.or, dec), "\n")
if (print)
cat("\nBias Parameters:",
"\n----------------\n\n")
if (print)
cat("Se(Exposure+):", bias[1],
"\nSe(Exposure-):", bias[2],
"\nSp(Exposure+):", bias[3],
"\nSp(Exposure-):", bias[4],
"\n")
}
invisible(list(obs.data = tab,
corr.data = corr.tab,
obs.measures = rmat,
adj.measures = rmatc,
bias.parms = bias))
}
| /episensr/R/misclassification.R | no_license | ingted/R-Examples | R | false | false | 10,469 | r | #' Sensitivity analysis for misclassification.
#'
#' Simple sensitivity analysis for misclassification.
#'
#' @param case Outcome variable. If a variable, this variable is tabulated against.
#' @param exposed Exposure variable.
#' @param implement Deprecated; please use type instead.
#' @param type Choice of misclassification:
#' \enumerate{
#' \item Exposure: bias analysis for exposure misclassification; corrections using
#' sensitivity and specificity: nondifferential and independent errors,
#' \item Outcome: bias analysis for outcome misclassification.
#' }
#' @param bias Vector defining the bias parameters. This vector has 4 elements
#' between 0 and 1, in the following order:
#' \enumerate{
#' \item Sensitivity of exposure (or outcome) classification among those with the
#' outcome,
#' \item Sensitivity of exposure (or outcome) classification among those without
#' the outcome,
#' \item Specificity of exposure (or outcome) classification among those with the
#' outcome,and
#' \item Specificity of exposure (or outcome) classification among those without
#' the outcome.
#' }
#' @param alpha Significance level.
#' @param dec Number of decimals in the printout.
#' @param print A logical scalar. Should the results be printed?
#'
#' @return A list with elements:
#' \item{obs.data}{The analysed 2 x 2 table from the observed data.}
#' \item{corr.data}{The expected observed data given the true data assuming
#' misclassfication.}
#' \item{obs.measures}{A table of observed relative risk and odds ratio with
#' confidence intervals.}
#' \item{adj.measures}{A table of adjusted relative risk and odds ratio.}
#' \item{bias.parms}{Input bias parameters.}
#'
#' @references Lash, T.L., Fox, M.P, Fink, A.K., 2009 \emph{Applying Quantitative
#' Bias Analysis to Epidemiologic Data}, pp.79--108, Springer.
#'
#' @examples
#' # The data for this example come from:
#' # Fink, A.K., Lash, T.L. A null association between smoking during pregnancy
#' # and breast cancer using Massachusetts registry data (United States).
#' # Cancer Causes Control 2003;14:497-503.
#' misclassification(matrix(c(215, 1449, 668, 4296),
#' dimnames = list(c("Breast cancer+", "Breast cancer-"),
#' c("Smoker+", "Smoker-")),
#' nrow = 2, byrow = TRUE),
#' type = "exposure",
#' bias = c(.78, .78, .99, .99))
#' misclassification(matrix(c(4558, 3428, 46305, 46085),
#' dimnames = list(c("AMI death+", "AMI death-"),
#' c("Male+", "Male-")),
#' nrow = 2, byrow = TRUE),
#' type = "outcome",
#' bias = c(.53, .53, .99, .99))
#' @export
#' @importFrom stats qnorm
misclassification <- function(case,
exposed,
implement = c("exposure", "outcome"),
type = c("exposure", "outcome"),
bias = NULL,
alpha = 0.05,
dec = 4,
print = TRUE){
if (!missing(implement)) {
warning("Argument implement is deprecated; please use type instead.",
call. = FALSE)
type <- implement
}
if(is.null(bias))
bias <- c(1, 1, 1, 1)
else bias <- bias
if(length(bias) != 4)
stop('The argument bias should be made of the following components: (1) Sensitivity of exposure classification among those with the outcome, (2) Sensitivity of exposure classification among those without the outcome, (3) Specificity of exposure classification among those with the outcome, and (4) Specificity of exposure classification among those without the outcome.')
if(!all(bias >= 0 & bias <=1))
stop('Bias parameters should be between 0 and 1.')
if(inherits(case, c("table", "matrix")))
tab <- case
else tab <- table(case, exposed)
tab <- tab[1:2, 1:2]
a <- tab[1, 1]
b <- tab[1, 2]
c <- tab[2, 1]
d <- tab[2, 2]
type <- match.arg(type)
if (type == "exposure") {
obs.rr <- (a/(a + c)) / (b/(b + d))
se.log.obs.rr <- sqrt((c/a) / (a+c) + (d/b) / (b+d))
lci.obs.rr <- exp(log(obs.rr) - qnorm(1 - alpha/2) * se.log.obs.rr)
uci.obs.rr <- exp(log(obs.rr) + qnorm(1 - alpha/2) * se.log.obs.rr)
obs.or <- (a/b) / (c/d)
se.log.obs.or <- sqrt(1/a + 1/b + 1/c + 1/d)
lci.obs.or <- exp(log(obs.or) - qnorm(1 - alpha/2) * se.log.obs.or)
uci.obs.or <- exp(log(obs.or) + qnorm(1 - alpha/2) * se.log.obs.or)
A <- (a - (1 - bias[3]) * (a + b)) / (bias[1] - (1 - bias[3]))
C <- (c - (1 - bias[4]) * (c + d)) / (bias[2] - (1 - bias[4]))
B <- (a + b) - A
D <- (c + d) - C
if(A < 1 | B < 1 | C < 1 | D < 1)
stop('Parameters chosen lead to negative cell(s) in adjusted 2x2 table.')
corr.tab <- matrix(c(A, B, C, D), nrow = 2, byrow = TRUE)
corr.rr <- (A/(A + C)) / (B/(B + D))
corr.or <- (A/B) / (C/D)
if (is.null(rownames(tab)))
rownames(tab) <- paste("Row", 1:2)
if (is.null(colnames(tab)))
colnames(tab) <- paste("Col", 1:2)
if (is.null(rownames(tab))){
rownames(corr.tab) <- paste("Row", 1:2)
} else {
rownames(corr.tab) <- row.names(tab)
}
if (is.null(colnames(tab))){
colnames(corr.tab) <- paste("Col", 1:2)
} else {
colnames(corr.tab) <- colnames(tab)
}
if (print)
cat("Observed Data:",
"\n--------------",
"\nOutcome :", rownames(tab)[1],
"\nComparing :", colnames(tab)[1], "vs.", colnames(tab)[2], "\n\n")
if (print)
print(round(tab, dec))
if (print)
cat("\nCorrected Data:",
"\n--------------------\n\n")
if (print)
print(round(corr.tab, dec))
if (print)
cat("\n")
rmat <- rbind(c(obs.rr, lci.obs.rr, uci.obs.rr), c(obs.or, lci.obs.or, uci.obs.or))
rownames(rmat) <- c(" Observed Relative Risk:", " Observed Odds Ratio:")
colnames(rmat) <- c(" ", paste(100 * (1 - alpha), "% conf.",
sep = ""), "interval")
rmatc <- rbind(corr.rr, corr.or)
rownames(rmatc) <- c("Corrected Relative Risk:",
" Corrected Odds Ratio:")
if (print)
cat("Observed Measures of Exposure-Outcome Relationship:",
"\n-----------------------------------------------------------------------------------\n\n")
if (print)
print(round(rmat, dec))
if (print)
cat("Corrected Relative Risk:", round(corr.rr, dec), "\n Corrected Odds Ratio:", round(corr.or, dec), "\n")
if (print)
cat("\nBias Parameters:",
"\n----------------\n\n")
if (print)
cat("Se(Outcome+):", bias[1],
"\nSe(Outcome-):", bias[2],
"\nSp(Outcome+):", bias[3],
"\nSp(Outcome-):", bias[4],
"\n")
}
if (type == "outcome"){
obs.rr <- (a/(a + c)) / (b/(b + d))
se.log.obs.rr <- sqrt((c/a) / (a+c) + (d/b) / (b+d))
lci.obs.rr <- exp(log(obs.rr) - qnorm(1 - alpha/2) * se.log.obs.rr)
uci.obs.rr <- exp(log(obs.rr) + qnorm(1 - alpha/2) * se.log.obs.rr)
obs.or <- (a/b) / (c/d)
se.log.obs.or <- sqrt(1/a + 1/b + 1/c + 1/d)
lci.obs.or <- exp(log(obs.or) - qnorm(1 - alpha/2) * se.log.obs.or)
uci.obs.or <- exp(log(obs.or) + qnorm(1 - alpha/2) * se.log.obs.or)
A <- (a - (1 - bias[3]) * (a + c)) / (bias[1] - (1 - bias[3]))
B <- (b - (1 - bias[4]) * (b + d)) / (bias[2] - (1 - bias[4]))
C <- (a + c) - A
D <- (b + d) - B
if(A < 1 | B < 1 | C < 1 | D < 1)
stop('Parameters chosen lead to negative cell(s) in adjusted 2x2 table.')
corr.tab <- matrix(c(A, B, C, D), nrow = 2, byrow = TRUE)
corr.rr <- (A/(A + C)) / (B/(B + D))
corr.or <- (A/B) / (C/D)
if (is.null(rownames(tab)))
rownames(tab) <- paste("Row", 1:2)
if (is.null(colnames(tab)))
colnames(tab) <- paste("Col", 1:2)
if (is.null(rownames(tab))){
rownames(corr.tab) <- paste("Row", 1:2)
} else {
rownames(corr.tab) <- row.names(tab)
}
if (is.null(colnames(tab))){
colnames(corr.tab) <- paste("Col", 1:2)
} else {
colnames(corr.tab) <- colnames(tab)
}
if (print)
cat("Observed Data:",
"\n--------------",
"\nOutcome :", rownames(tab)[1],
"\nComparing :", colnames(tab)[1], "vs.", colnames(tab)[2], "\n\n")
if (print)
print(round(tab, dec))
if (print)
cat("\nCorrected Data:",
"\n--------------------\n\n")
if (print)
print(round(corr.tab, dec))
if (print)
cat("\n")
rmat <- rbind(c(obs.rr, lci.obs.rr, uci.obs.rr), c(obs.or, lci.obs.or, uci.obs.or))
rownames(rmat) <- c(" Observed Relative Risk:", " Observed Odds Ratio:")
colnames(rmat) <- c(" ", paste(100 * (1 - alpha), "% conf.",
sep = ""), "interval")
rmatc <- rbind(corr.rr, corr.or)
rownames(rmatc) <- c("Corrected Relative Risk:",
" Corrected Odds Ratio:")
if (print)
cat("Observed Measures of Exposure-Outcome Relationship:",
"\n-----------------------------------------------------------------------------------\n\n")
if (print)
print(round(rmat, dec))
if (print)
cat("Corrected Relative Risk:", round(corr.rr, dec), "\n Corrected Odds Ratio:", round(corr.or, dec), "\n")
if (print)
cat("\nBias Parameters:",
"\n----------------\n\n")
if (print)
cat("Se(Exposure+):", bias[1],
"\nSe(Exposure-):", bias[2],
"\nSp(Exposure+):", bias[3],
"\nSp(Exposure-):", bias[4],
"\n")
}
invisible(list(obs.data = tab,
corr.data = corr.tab,
obs.measures = rmat,
adj.measures = rmatc,
bias.parms = bias))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/esize_m.R
\name{esize_m}
\alias{esize_m}
\title{Effective Size}
\usage{
esize_m(lineup_table, k, both = FALSE)
}
\arguments{
\item{lineup_table}{A table of lineup choices}
\item{k}{Number of members in lineup. Must be specified by user (scalar).}
\item{both}{Defaults to FALSE. Returns Tredoux's adjusted effective size estimate.
If TRUE, provides both Malpass's (1981) and Makpass's adjusted (see: Tredoux, 1998)
calculations of effective size.}
}
\value{
Malpass's original & adjusted estimates of effective size
}
\description{
Function for computing Effective Size
}
\details{
Reduces the size of a lineup from a (corrected) nominal starting
value by the degree to which members are, in sum, chosen below
the level of chance expectation.
}
\examples{
#Data:
lineup_vec <- round(runif(100, 1, 6))
#Call:
esize_m(lineup_vec, 6, both = TRUE)
esize_m(lineup_vec, 6)
}
\references{
Malpass, R. S. (1981). Effective size and defendant bias in
eyewitness identification lineups. \emph{Law and Human Behavior, 5}(4), 299-309.
Malpass, R. S., Tredoux, C., & McQuiston-Surrett, D. (2007). Lineup
construction and lineup fairness. In R. Lindsay, D. F. Ross, J. D. Read,
& M. P. Toglia (Eds.), \emph{Handbook of Eyewitness Psychology, Vol. 2: Memory for
people} (pp. 155-178). Mahwah, NJ: Lawrence Erlbaum Associates.
Tredoux, C. G. (1998). Statistical inference on measures of lineup fairness.
\emph{Law and Human Behavior, 22}(2), 217-237.
Tredoux, C. (1999). Statistical considerations when determining measures of
lineup size and lineup bias. \emph{Applied Cognitive Psychology}, 13, S9-S26.
Wells, G. L.,Leippe, M. R., & Ostrom, T. M. (1979). Guidelines for
empirically assessing the fairness of a lineup. \emph{Law and Human Behavior,
3}(4), 285-293.
}
| /man/esize_m.Rd | no_license | tmnaylor/r4lineups | R | false | true | 2,020 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/esize_m.R
\name{esize_m}
\alias{esize_m}
\title{Effective Size}
\usage{
esize_m(lineup_table, k, both = FALSE)
}
\arguments{
\item{lineup_table}{A table of lineup choices}
\item{k}{Number of members in lineup. Must be specified by user (scalar).}
\item{both}{Defaults to FALSE. Returns Tredoux's adjusted effective size estimate.
If TRUE, provides both Malpass's (1981) and Makpass's adjusted (see: Tredoux, 1998)
calculations of effective size.}
}
\value{
Malpass's original & adjusted estimates of effective size
}
\description{
Function for computing Effective Size
}
\details{
Reduces the size of a lineup from a (corrected) nominal starting
value by the degree to which members are, in sum, chosen below
the level of chance expectation.
}
\examples{
#Data:
lineup_vec <- round(runif(100, 1, 6))
#Call:
esize_m(lineup_vec, 6, both = TRUE)
esize_m(lineup_vec, 6)
}
\references{
Malpass, R. S. (1981). Effective size and defendant bias in
eyewitness identification lineups. \emph{Law and Human Behavior, 5}(4), 299-309.
Malpass, R. S., Tredoux, C., & McQuiston-Surrett, D. (2007). Lineup
construction and lineup fairness. In R. Lindsay, D. F. Ross, J. D. Read,
& M. P. Toglia (Eds.), \emph{Handbook of Eyewitness Psychology, Vol. 2: Memory for
people} (pp. 155-178). Mahwah, NJ: Lawrence Erlbaum Associates.
Tredoux, C. G. (1998). Statistical inference on measures of lineup fairness.
\emph{Law and Human Behavior, 22}(2), 217-237.
Tredoux, C. (1999). Statistical considerations when determining measures of
lineup size and lineup bias. \emph{Applied Cognitive Psychology}, 13, S9-S26.
Wells, G. L.,Leippe, M. R., & Ostrom, T. M. (1979). Guidelines for
empirically assessing the fairness of a lineup. \emph{Law and Human Behavior,
3}(4), 285-293.
}
|
observeEvent(input$nextSDA_br2, {
SDAorder <- 1:as.numeric(envv$SDAres$command_arguments$num_comps)
if(envv$QC_compIter + 1 %in% SDAorder){
# envv$QC_compIter = SDAorder[which(SDAorder==envv$QC_compIter) + 1]
# updateTextInput(session, "SDAVn", value = envv$QC_compIter)
updateTextInput(session, "SDAVn", value = SDAorder[which(SDAorder==envv$QC_compIter) + 1])
}
})
observeEvent(input$prevSDA_br2, {
# choice <- 1:as.numeric(envv$SDAres$command_arguments$num_comps) #paste0("SDA", 1:as.numeric(envv$SDAres$command_arguments$num_comps)) # envv$QC_components
# SDAorder <- setdiff(choice, envv$Remove_comps)
SDAorder <- 1:as.numeric(envv$SDAres$command_arguments$num_comps)
if(envv$QC_compIter - 1 %in% SDAorder){
#envv$QC_compIter = SDAorder[which(SDAorder==envv$QC_compIter) - 1]
# updateTextInput(session, "SDAVn", value = envv$QC_compIter)
updateTextInput(session, "SDAVn", value = SDAorder[which(SDAorder==envv$QC_compIter) - 1])
}
})
observeEvent(input$SDAVn, {
if(!is.null(envv$QC_compIter)){
SDAorder <- 1:as.numeric(envv$SDAres$command_arguments$num_comps)
if(is.null(input$SDAVn) | (!(as.numeric(input$SDAVn) %in% SDAorder))) {
print("Its null")
updateTextInput(session, "SDAVn", value = envv$QC_compIter)
} else {
envv$QC_compIter = as.numeric(input$SDAVn)
}
}
# # SDAorder <- 1:as.numeric(envv$SDAres$command_arguments$num_comps)
# if(!is.null(envv$QC_compIter)){
#
# if(!is.null(input$SDAVn)) {
#
#
# } else {
# # input$SDAVn <- envv$QC_compIter
# }
# } else {
#
# }
# choice <- 1:as.numeric(envv$SDAres$command_arguments$num_comps) #paste0("SDA", 1:as.numeric(envv$SDAres$command_arguments$num_comps)) # envv$QC_components
# SDAorder <- setdiff(choice, envv$Remove_comps)
#
# input$SDAVn <- as.character(envv$QC_compIter)
#
# if(as.numeric(input$SDAVn) %in% SDAorder) {
#
# envv$QC_compIter = as.numeric(input$SDAVn)
#
# } else {
})
observeEvent(input$C2Cpos, {
Out1 <- print_gene_list(results=envv$SDAres, as.numeric(envv$QC_compIter), PosOnly = T) %>%
#group_by(package) %>%
#tally() %>%
#arrange(desc(n), tolower(package)) %>%
#mutate(percentage = n / nrow(pkgData()) * 100) %>%
#select("Package name" = package, "% of downloads" = percentage) %>%
as.data.frame() %>%
head(as.numeric(input$NoOfGenes))
Out1 <- Out1$Gene.Name
# print(Out1)
clipr::write_clip(Out1)
})
observeEvent(input$C2Cneg, {
Out2 <- print_gene_list(results=envv$SDAres, as.numeric(envv$QC_compIter), NegOnly = T) %>%
#group_by(package) %>%
#tally() %>%
#arrange(desc(n), tolower(package)) %>%
#mutate(percentage = n / nrow(pkgData()) * 100) %>%
#select("Package name" = package, "% of downloads" = percentage) %>%
as.data.frame() %>%
head(as.numeric(input$NoOfGenes))
Out2 <- Out2$Gene.Name
# print(Out1)
clipr::write_clip(Out2)
}) | /inst/app/app_OE_BatchRemoved.R | no_license | bimberlabinternal/ShinySDA | R | false | false | 3,098 | r | observeEvent(input$nextSDA_br2, {
SDAorder <- 1:as.numeric(envv$SDAres$command_arguments$num_comps)
if(envv$QC_compIter + 1 %in% SDAorder){
# envv$QC_compIter = SDAorder[which(SDAorder==envv$QC_compIter) + 1]
# updateTextInput(session, "SDAVn", value = envv$QC_compIter)
updateTextInput(session, "SDAVn", value = SDAorder[which(SDAorder==envv$QC_compIter) + 1])
}
})
observeEvent(input$prevSDA_br2, {
# choice <- 1:as.numeric(envv$SDAres$command_arguments$num_comps) #paste0("SDA", 1:as.numeric(envv$SDAres$command_arguments$num_comps)) # envv$QC_components
# SDAorder <- setdiff(choice, envv$Remove_comps)
SDAorder <- 1:as.numeric(envv$SDAres$command_arguments$num_comps)
if(envv$QC_compIter - 1 %in% SDAorder){
#envv$QC_compIter = SDAorder[which(SDAorder==envv$QC_compIter) - 1]
# updateTextInput(session, "SDAVn", value = envv$QC_compIter)
updateTextInput(session, "SDAVn", value = SDAorder[which(SDAorder==envv$QC_compIter) - 1])
}
})
observeEvent(input$SDAVn, {
if(!is.null(envv$QC_compIter)){
SDAorder <- 1:as.numeric(envv$SDAres$command_arguments$num_comps)
if(is.null(input$SDAVn) | (!(as.numeric(input$SDAVn) %in% SDAorder))) {
print("Its null")
updateTextInput(session, "SDAVn", value = envv$QC_compIter)
} else {
envv$QC_compIter = as.numeric(input$SDAVn)
}
}
# # SDAorder <- 1:as.numeric(envv$SDAres$command_arguments$num_comps)
# if(!is.null(envv$QC_compIter)){
#
# if(!is.null(input$SDAVn)) {
#
#
# } else {
# # input$SDAVn <- envv$QC_compIter
# }
# } else {
#
# }
# choice <- 1:as.numeric(envv$SDAres$command_arguments$num_comps) #paste0("SDA", 1:as.numeric(envv$SDAres$command_arguments$num_comps)) # envv$QC_components
# SDAorder <- setdiff(choice, envv$Remove_comps)
#
# input$SDAVn <- as.character(envv$QC_compIter)
#
# if(as.numeric(input$SDAVn) %in% SDAorder) {
#
# envv$QC_compIter = as.numeric(input$SDAVn)
#
# } else {
})
observeEvent(input$C2Cpos, {
Out1 <- print_gene_list(results=envv$SDAres, as.numeric(envv$QC_compIter), PosOnly = T) %>%
#group_by(package) %>%
#tally() %>%
#arrange(desc(n), tolower(package)) %>%
#mutate(percentage = n / nrow(pkgData()) * 100) %>%
#select("Package name" = package, "% of downloads" = percentage) %>%
as.data.frame() %>%
head(as.numeric(input$NoOfGenes))
Out1 <- Out1$Gene.Name
# print(Out1)
clipr::write_clip(Out1)
})
observeEvent(input$C2Cneg, {
Out2 <- print_gene_list(results=envv$SDAres, as.numeric(envv$QC_compIter), NegOnly = T) %>%
#group_by(package) %>%
#tally() %>%
#arrange(desc(n), tolower(package)) %>%
#mutate(percentage = n / nrow(pkgData()) * 100) %>%
#select("Package name" = package, "% of downloads" = percentage) %>%
as.data.frame() %>%
head(as.numeric(input$NoOfGenes))
Out2 <- Out2$Gene.Name
# print(Out1)
clipr::write_clip(Out2)
}) |
nri <- function(
x,
b1,
b2,
recursive = FALSE,
bywavelength = TRUE
)
{
if (!is.speclib(x))
stop("x must be of class 'Speclib'")
range.of.wavelength <- x$fwhm
reflectance <- spectra(x)
wavelength <- wavelength(x)
if (recursive)
{
if (inherits(nrow(reflectance) * (sum(1:length(wavelength))-length(wavelength)), "error"))
{
stop("Number of Samples*(number of wavelengths^2) exceeds maximum
vector size of 2^31-1")
}
nri_dat <- single(length = nrow(reflectance) * (sum(1:length(wavelength))-length(wavelength)))
result <- .Fortran("recursive_nri",
nwl = as.integer(length(wavelength)),
nspec = as.integer(nrow(reflectance)),
reflectance = as.single(as.matrix(reflectance)),
nri = nri_dat,
nri_length = as.integer(nrow(reflectance) *
(sum(1:length(wavelength))-length(wavelength)))#,
# PACKAGE = "hsdar"
)
result <- distMat3D(as.numeric(result$nri), length(wavelength), nrow(reflectance))
result <- new("Nri", nri = result, fwhm = range.of.wavelength,
wavelength = wavelength,
dimnames = list(Band_1 = paste("B_", wavelength, sep = ""),
Band_2 = paste("B_", wavelength, sep = ""),
Sample = idSpeclib(x)),
attributes = attribute(x)
)
if (!is.null(attr(x, "caretParameters")))
attr(result, "caretParameters") <- attr(x, "caretParameters")
result@usagehistory <- c(x@usagehistory, "NRI values calculated")
} else {
b1 <- as.vector(unlist(b1))
b2 <- as.vector(unlist(b2))
stopifnot(length(b1) == length(b2))
if (length(b1) > 1)
{
res <- apply(matrix(1:length(b1), ncol = 1), 1,
FUN = function(i, x, b1, b2, bywavelength)
{
index <- nri(x, b1 = b1[i], b2 = b2[i], bywavelength = bywavelength)
return(index)
}, x, b1, b2, bywavelength)
colnames(res) <- paste("B", b1, "B", b2, sep = "_")
rownames(res) <- idSpeclib(x)
return(res)
}
if (bywavelength)
{
posb1 <- which(wavelength==b1)
posb2 <- which(wavelength==b2)
} else {
posb1 <- b1
posb2 <- b2
}
result <- (reflectance[,posb1]-reflectance[,posb2])/(reflectance[,posb1]+reflectance[,posb2])
if (class(result)=="data.frame")
names(result)<-"NRI"
}
return(result)
}
| /hsdar/R/nri.R | no_license | ingted/R-Examples | R | false | false | 2,649 | r | nri <- function(
x,
b1,
b2,
recursive = FALSE,
bywavelength = TRUE
)
{
if (!is.speclib(x))
stop("x must be of class 'Speclib'")
range.of.wavelength <- x$fwhm
reflectance <- spectra(x)
wavelength <- wavelength(x)
if (recursive)
{
if (inherits(nrow(reflectance) * (sum(1:length(wavelength))-length(wavelength)), "error"))
{
stop("Number of Samples*(number of wavelengths^2) exceeds maximum
vector size of 2^31-1")
}
nri_dat <- single(length = nrow(reflectance) * (sum(1:length(wavelength))-length(wavelength)))
result <- .Fortran("recursive_nri",
nwl = as.integer(length(wavelength)),
nspec = as.integer(nrow(reflectance)),
reflectance = as.single(as.matrix(reflectance)),
nri = nri_dat,
nri_length = as.integer(nrow(reflectance) *
(sum(1:length(wavelength))-length(wavelength)))#,
# PACKAGE = "hsdar"
)
result <- distMat3D(as.numeric(result$nri), length(wavelength), nrow(reflectance))
result <- new("Nri", nri = result, fwhm = range.of.wavelength,
wavelength = wavelength,
dimnames = list(Band_1 = paste("B_", wavelength, sep = ""),
Band_2 = paste("B_", wavelength, sep = ""),
Sample = idSpeclib(x)),
attributes = attribute(x)
)
if (!is.null(attr(x, "caretParameters")))
attr(result, "caretParameters") <- attr(x, "caretParameters")
result@usagehistory <- c(x@usagehistory, "NRI values calculated")
} else {
b1 <- as.vector(unlist(b1))
b2 <- as.vector(unlist(b2))
stopifnot(length(b1) == length(b2))
if (length(b1) > 1)
{
res <- apply(matrix(1:length(b1), ncol = 1), 1,
FUN = function(i, x, b1, b2, bywavelength)
{
index <- nri(x, b1 = b1[i], b2 = b2[i], bywavelength = bywavelength)
return(index)
}, x, b1, b2, bywavelength)
colnames(res) <- paste("B", b1, "B", b2, sep = "_")
rownames(res) <- idSpeclib(x)
return(res)
}
if (bywavelength)
{
posb1 <- which(wavelength==b1)
posb2 <- which(wavelength==b2)
} else {
posb1 <- b1
posb2 <- b2
}
result <- (reflectance[,posb1]-reflectance[,posb2])/(reflectance[,posb1]+reflectance[,posb2])
if (class(result)=="data.frame")
names(result)<-"NRI"
}
return(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lidar_analysis.R
\name{wbt_zlidar_to_las}
\alias{wbt_zlidar_to_las}
\title{zlidar to las}
\usage{
wbt_zlidar_to_las(
inputs = NULL,
outdir = NULL,
wd = NULL,
verbose_mode = FALSE,
compress_rasters = FALSE
)
}
\arguments{
\item{inputs}{Input zlidar files.}
\item{outdir}{Output directory into which zlidar files are created. If unspecified, it is assumed to be the same as the inputs.}
\item{wd}{Changes the working directory.}
\item{verbose_mode}{Sets verbose mode. If verbose mode is False, tools will not print output messages.}
\item{compress_rasters}{Sets the flag used by WhiteboxTools to determine whether to use compression for output rasters.}
}
\value{
Returns the tool text outputs.
}
\description{
Converts one or more zlidar files into the LAS data format.
}
| /man/wbt_zlidar_to_las.Rd | permissive | bkielstr/whiteboxR | R | false | true | 863 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lidar_analysis.R
\name{wbt_zlidar_to_las}
\alias{wbt_zlidar_to_las}
\title{zlidar to las}
\usage{
wbt_zlidar_to_las(
inputs = NULL,
outdir = NULL,
wd = NULL,
verbose_mode = FALSE,
compress_rasters = FALSE
)
}
\arguments{
\item{inputs}{Input zlidar files.}
\item{outdir}{Output directory into which zlidar files are created. If unspecified, it is assumed to be the same as the inputs.}
\item{wd}{Changes the working directory.}
\item{verbose_mode}{Sets verbose mode. If verbose mode is False, tools will not print output messages.}
\item{compress_rasters}{Sets the flag used by WhiteboxTools to determine whether to use compression for output rasters.}
}
\value{
Returns the tool text outputs.
}
\description{
Converts one or more zlidar files into the LAS data format.
}
|
library(tidyverse)
library(lubridate)
# link: https://direito.consudata.com.br/jurimetria/dplyr/
### Operacoes em linhas
## count
relator <- detalhes %>%
count(relator_atual)
relator <- detalhes %>%
count(relator_atual, sort = TRUE)
relator_classe <- detalhes %>%
count(relator_atual, classe, sort = TRUE)
## filter
ai <- detalhes %>%
filter(classe == "AI")
ai_presidente <- detalhes %>%
filter(classe == "AI" , relator_atual == "MINISTRO PRESIDENTE")
ai_hc <- detalhes %>%
filter(classe == "AI" | classe == "HC")
na <- detalhes %>%
filter(numero_unico != "0004754-72.2008.0.01.0000" | is.na(numero_unico))
# O %in% serve para comparacao de vetores, inclusive de tabelas diferentes.
classes <- c("AI","HC")
ai_hc <- detalhes %>%
filter(classe %in% classes) # O semi_join() e uma opcao ao inves do %in%.
### Operacoes em colunas
## Selecionar
incidente <- informacoes %>%
select(incidente)
incidente_origem <- informacoes %>%
select(incidente, origem)
incidente <- informacoes %>%
select(1)
info3 <- informacoes %>%
select(1:3)
info4 <- informacoes %>%
select(2, 6)
info_3 <- informacoes %>%
select(-3)
info_4_6 <- informacoes %>%
select(-c(4,6))
info_assunto <- informacoes %>%
select(assunto1:assunto3)
assunto <- informacoes %>%
select(starts_with("assunto"))
origem <- informacoes %>%
select(ends_with("origem"))
origem <- informacoes %>%
select(contains("origem"))
assunto <- informacoes %>%
select(matches("\\d")) #seleciona todas as infomarcoes que tem numero na coluna
assuntos <- informacoes %>%
select(assunto = assunto1)
### arrange
detalhes <- detalhes %>%
arrange(relator_atual)
detalhes <- detalhes %>%
arrange(desc(relator_atual))
### mutate
#### criando novas colunas
informacoes <- informacoes %>%
mutate(ano_protocolo = year(data_protocolo))
informacoes <- informacoes %>%
mutate(ano_protocolo = NULL)
informacoes <- informacoes %>%
mutate(ano_protocolo = year(data_protocolo), .after = data_protocolo)
informacoes <- informacoes %>%
mutate(ano_protocolo = year(data_protocolo),
mes_protocolo = month(data_protocolo),
dia_protocolo = wday(data_protocolo, label = TRUE, abbr=FALSE))
### across
## AT
informacoes_AT <- informacoes %>%
mutate(across(2:4, .fns = ~str_remove(.x, "DIREITO"))) #remove a palavra das linhas
## ALL
informacoes_ALL <- informacoes %>%
mutate(across(.fns = as.character))
## IF
informacoes_IF <- informacoes %>%
mutate(across(where(is.numeric), as.character))
### Summarize
df <- tibble(idade = sample(18:50,30,replace = TRUE))
sumario <- df %>%
summarize(media = mean(idade),
mediana= median(idade),
desvio_padrao = sd(idade),
minimo = min(idade),
maximo = max(idade)
)
### Group by
View(starwars)
sumario<- starwars %>%
group_by(sex) %>%
summarize(peso = mean(mass,na.rm = TRUE),
altura = mean(height, na.rm = TRUE))
| /R/dplyr.R | permissive | saulofender/R_PostgreSQL | R | false | false | 3,236 | r | library(tidyverse)
library(lubridate)
# link: https://direito.consudata.com.br/jurimetria/dplyr/
### Operacoes em linhas
## count
relator <- detalhes %>%
count(relator_atual)
relator <- detalhes %>%
count(relator_atual, sort = TRUE)
relator_classe <- detalhes %>%
count(relator_atual, classe, sort = TRUE)
## filter
ai <- detalhes %>%
filter(classe == "AI")
ai_presidente <- detalhes %>%
filter(classe == "AI" , relator_atual == "MINISTRO PRESIDENTE")
ai_hc <- detalhes %>%
filter(classe == "AI" | classe == "HC")
na <- detalhes %>%
filter(numero_unico != "0004754-72.2008.0.01.0000" | is.na(numero_unico))
# O %in% serve para comparacao de vetores, inclusive de tabelas diferentes.
classes <- c("AI","HC")
ai_hc <- detalhes %>%
filter(classe %in% classes) # O semi_join() e uma opcao ao inves do %in%.
### Operacoes em colunas
## Selecionar
incidente <- informacoes %>%
select(incidente)
incidente_origem <- informacoes %>%
select(incidente, origem)
incidente <- informacoes %>%
select(1)
info3 <- informacoes %>%
select(1:3)
info4 <- informacoes %>%
select(2, 6)
info_3 <- informacoes %>%
select(-3)
info_4_6 <- informacoes %>%
select(-c(4,6))
info_assunto <- informacoes %>%
select(assunto1:assunto3)
assunto <- informacoes %>%
select(starts_with("assunto"))
origem <- informacoes %>%
select(ends_with("origem"))
origem <- informacoes %>%
select(contains("origem"))
assunto <- informacoes %>%
select(matches("\\d")) #seleciona todas as infomarcoes que tem numero na coluna
assuntos <- informacoes %>%
select(assunto = assunto1)
### arrange
detalhes <- detalhes %>%
arrange(relator_atual)
detalhes <- detalhes %>%
arrange(desc(relator_atual))
### mutate
#### criando novas colunas
informacoes <- informacoes %>%
mutate(ano_protocolo = year(data_protocolo))
informacoes <- informacoes %>%
mutate(ano_protocolo = NULL)
informacoes <- informacoes %>%
mutate(ano_protocolo = year(data_protocolo), .after = data_protocolo)
informacoes <- informacoes %>%
mutate(ano_protocolo = year(data_protocolo),
mes_protocolo = month(data_protocolo),
dia_protocolo = wday(data_protocolo, label = TRUE, abbr=FALSE))
### across
## AT
informacoes_AT <- informacoes %>%
mutate(across(2:4, .fns = ~str_remove(.x, "DIREITO"))) #remove a palavra das linhas
## ALL
informacoes_ALL <- informacoes %>%
mutate(across(.fns = as.character))
## IF
informacoes_IF <- informacoes %>%
mutate(across(where(is.numeric), as.character))
### Summarize
df <- tibble(idade = sample(18:50,30,replace = TRUE))
sumario <- df %>%
summarize(media = mean(idade),
mediana= median(idade),
desvio_padrao = sd(idade),
minimo = min(idade),
maximo = max(idade)
)
### Group by
View(starwars)
sumario<- starwars %>%
group_by(sex) %>%
summarize(peso = mean(mass,na.rm = TRUE),
altura = mean(height, na.rm = TRUE))
|
dadosParticipacaoGeral = read.csv("C:\\Users\\thiag_000\\Desktop\\analises2\\participacaoComunidades.csv", header = TRUE, sep = ';')
dadosRespostas = read.csv("C:\\Users\\thiag_000\\Desktop\\analises2\\respostasComunidades.csv", header = TRUE, sep = ';')
#dadosParticipacaoGeral = read.csv("/home/thiagoprocaci/analises/participacaoComunidades.csv", header = TRUE, sep = ';')
#dadosRespostas = read.csv("/home/thiagoprocaci/analises/respostasComunidades.csv", header = TRUE, sep = ';')
dadosBio = subset(dadosParticipacaoGeral, dadosParticipacaoGeral$comunidade == 'BIO')
resumo <- summary(dadosBio$reputacao)
primeiroQuartil <- resumo[2]
terceiroQuartil <- resumo[5]
dadosComunidadeTop <- subset(dadosParticipacaoGeral, dadosParticipacaoGeral$comunidade == 'BIO' & dadosParticipacaoGeral$reputacao >= primeiroQuartil & dadosParticipacaoGeral$reputacao <= terceiroQuartil)
usuarioList <- unique(dadosComunidadeTop$idUsuario)
top50DadosRespostas <- subset(dadosRespostas, dadosRespostas$idUsuario %in% usuarioList)
dadosRespostasComunidade <- subset(dadosRespostas, dadosRespostas$comunidade == 'BIO' & dadosRespostas$anoCriacao >= 2012)
colunas <- c(unique(sort(substr(dadosRespostasComunidade$dataCriacao, 1, 7))), "SOMA_VOTE_UP", "MEDIA" , "REPUTACAO")
numeroLinhas <- length(usuarioList)
numeroColunas <- length(colunas)
tabela <- matrix(nrow=numeroLinhas, ncol=numeroColunas, dimnames=list(usuarioList,colunas))
#inicializando a tabela
for(usuario in usuarioList) {
for(coluna in colunas) {
tabela[usuario, coluna] <- 0
}
}
for(i in 1:nrow(top50DadosRespostas)) {
row <- top50DadosRespostas[i,]
usuario <- as.character(row$idUsuario)
ano <- as.numeric(row$anoCriacao)
anoMes <- as.character(substr(row$dataCriacao, 1, 7))
votesUp <- row$votosPositivos
if(ano >= 2012) {
tabela[usuario, anoMes] <- tabela[usuario, anoMes] + votesUp
}
}
medias <- c()
for(usuario in usuarioList) {
for(coluna in colunas) {
if(coluna != "SOMA_VOTE_UP" & coluna != "REPUTACAO" & coluna != "MEDIA") {
tabela[usuario, "SOMA_VOTE_UP"] <- tabela[usuario, coluna] + tabela[usuario, "SOMA_VOTE_UP"]
}
}
tabela[usuario, "MEDIA"] <- (tabela[usuario, "SOMA_VOTE_UP"]/(length(colunas) - 3))
tabela[usuario, "REPUTACAO"] <- subset(dadosComunidadeTop, dadosComunidadeTop$idUsuario == usuario)$reputacao
medias <- c(medias, as.numeric(tabela[usuario, "MEDIA"]))
}
print(tabela)
print(paste("Analise de ", as.character(length(colunas) - 3), " meses"))
#hist(medias, breaks = 50, xlab = "Average Answers", ylab = "Frequency", main = "Average Answers")
boxplot(medias, ylab = "Average Votes Up")
print(summary(medias))
| /analysis/bio/voteUpRespostasQuartisBio.R | no_license | thiagoprocaci/experts-semantic-analysis | R | false | false | 2,654 | r | dadosParticipacaoGeral = read.csv("C:\\Users\\thiag_000\\Desktop\\analises2\\participacaoComunidades.csv", header = TRUE, sep = ';')
dadosRespostas = read.csv("C:\\Users\\thiag_000\\Desktop\\analises2\\respostasComunidades.csv", header = TRUE, sep = ';')
#dadosParticipacaoGeral = read.csv("/home/thiagoprocaci/analises/participacaoComunidades.csv", header = TRUE, sep = ';')
#dadosRespostas = read.csv("/home/thiagoprocaci/analises/respostasComunidades.csv", header = TRUE, sep = ';')
dadosBio = subset(dadosParticipacaoGeral, dadosParticipacaoGeral$comunidade == 'BIO')
resumo <- summary(dadosBio$reputacao)
primeiroQuartil <- resumo[2]
terceiroQuartil <- resumo[5]
dadosComunidadeTop <- subset(dadosParticipacaoGeral, dadosParticipacaoGeral$comunidade == 'BIO' & dadosParticipacaoGeral$reputacao >= primeiroQuartil & dadosParticipacaoGeral$reputacao <= terceiroQuartil)
usuarioList <- unique(dadosComunidadeTop$idUsuario)
top50DadosRespostas <- subset(dadosRespostas, dadosRespostas$idUsuario %in% usuarioList)
dadosRespostasComunidade <- subset(dadosRespostas, dadosRespostas$comunidade == 'BIO' & dadosRespostas$anoCriacao >= 2012)
colunas <- c(unique(sort(substr(dadosRespostasComunidade$dataCriacao, 1, 7))), "SOMA_VOTE_UP", "MEDIA" , "REPUTACAO")
numeroLinhas <- length(usuarioList)
numeroColunas <- length(colunas)
tabela <- matrix(nrow=numeroLinhas, ncol=numeroColunas, dimnames=list(usuarioList,colunas))
#inicializando a tabela
for(usuario in usuarioList) {
for(coluna in colunas) {
tabela[usuario, coluna] <- 0
}
}
for(i in 1:nrow(top50DadosRespostas)) {
row <- top50DadosRespostas[i,]
usuario <- as.character(row$idUsuario)
ano <- as.numeric(row$anoCriacao)
anoMes <- as.character(substr(row$dataCriacao, 1, 7))
votesUp <- row$votosPositivos
if(ano >= 2012) {
tabela[usuario, anoMes] <- tabela[usuario, anoMes] + votesUp
}
}
medias <- c()
for(usuario in usuarioList) {
for(coluna in colunas) {
if(coluna != "SOMA_VOTE_UP" & coluna != "REPUTACAO" & coluna != "MEDIA") {
tabela[usuario, "SOMA_VOTE_UP"] <- tabela[usuario, coluna] + tabela[usuario, "SOMA_VOTE_UP"]
}
}
tabela[usuario, "MEDIA"] <- (tabela[usuario, "SOMA_VOTE_UP"]/(length(colunas) - 3))
tabela[usuario, "REPUTACAO"] <- subset(dadosComunidadeTop, dadosComunidadeTop$idUsuario == usuario)$reputacao
medias <- c(medias, as.numeric(tabela[usuario, "MEDIA"]))
}
print(tabela)
print(paste("Analise de ", as.character(length(colunas) - 3), " meses"))
#hist(medias, breaks = 50, xlab = "Average Answers", ylab = "Frequency", main = "Average Answers")
boxplot(medias, ylab = "Average Votes Up")
print(summary(medias))
|
library(plyr)
daycares <- read.csv("shps/kendall/day_cares.csv", stringsAsFactors=FALSE)
public <- read.csv("shps/kendall/public_schools.csv", stringsAsFactors=FALSE)
private <- read.csv("shps/kendall/private_schools.csv", stringsAsFactors=FALSE)
housing <- read.csv("shps/kendall/pub_housing.csv", stringsAsFactors=FALSE)
datadc <- daycares[,c("NAME", "CITY", "LAT", "LON")]
dataps <- public[,c("NAME", "LCITY", "LAT", "LON")]
datapr <- private[,c("NAME", "LCITY", "LAT", "LON")]
dataph <- housing[,c("PROJECT_NA", "PLACE_NM2K", "Y", "X")]
colnames(dataph) <- c("NAME", "LCITY", "LAT", "LON")
colnames(datadc) <- c("NAME", "LCITY", "LAT", "LON")
datadc$TYPE <- "daycare"
dataps$TYPE <- "public school"
datapr$TYPE <- "private school"
dataph$TYPE <- "public housing"
data_all <- rbind(datadc, dataps)
data_all <- rbind(data_all, datapr)
data_all <- rbind(data_all, dataph)
write.csv(data_all, "data_all.csv")
#OLD DATA
datasc <- schools[,c("lat","lng")]
datahs <- housing[,c("LAT","LON")]
datahs$type <- "housing"
#lat lng for daycares
daycares$spot <- gsub("^.*?\\(","", daycares$Location)
daycares$spot <- gsub("\\)", "", daycares$spot)
list <- strsplit(daycares$spot, ", ")
df <- ldply(list)
colnames(df) <- c("lat", "lng")
daycares <- cbind(daycares, df)
#lat lng for schools
schools$spot <- gsub("^.*?\\(","", schools$Location.1)
schools$spot <- gsub("\\)", "", schools$spot)
list2 <- strsplit(schools$spot, ", ")
df2 <- ldply(list2)
colnames(df2) <- c("lat", "lng")
schools <- cbind(schools, df2)
datadc <- daycares[,c("lat","lng")]
datasc <- schools[,c("lat","lng")]
datadc$type <- "daycare"
datasc$type <- "school"
colnames(datahs) <- c("lat","lng", "type")
datastuff <- rbind(datadc, datasc)
datastuff <- rbind(datastuff, datahs)
# Let's calculate area square miles per town
#load rgdal package
library(rgdal)
## load your polygone shapefile
dsn<-"townsmap"
ogrListLayers(dsn)
#list the layers in the above directory
lol<-readOGR(dsn,"towns")
#shapefile name is lol
#check your data
summary(lol)
getClass("Polygon")
ur.area<-sapply(slot(lol, "polygons"), function(x) sapply(slot(x, "Polygons"), slot, "area"))
##check the areas
str(ur.area)
area.map(dsn)
require(rgeos)
gArea(ur.area)
require(gtools)
require(ggplot2)
require(rgdal)
require(scales)
require(ggmap)
require(dplyr)
require(Cairo)
require(gpclib)
require(maptools)
require(reshape)
gpclibPermit()
gpclibPermitStatus()
towntracts <- readOGR(dsn="townsmap", layer="towns")
#towntracts <- fortify(towntracts, region="NAME10")
#gArea(towntracts)
# area.map
require(maps)
require(raster)
#area.map(towntracts, "NAME10")
# Blah, ok let's just look at schools
sch <- subset(schools, select=c("School.Name", "lat", "lng"))
colnames(sch) <- c("Name", "Lat", "Lng")
dc <- subset(daycares, select=c("Name", "lat", "lng"))
colnames(dc) <- c("Name", "Lat", "Lng")
sch_dc <- rbind(sch, dc)
sch_dc$Lat <- as.numeric(sch_dc$Lat)
sch_dc$Lng <- as.numeric(sch_dc$Lng)
plot(towntracts)
#coordinates(sch_dc) <- c("Lng", "Lat")
#projection(sch_dc) <- "+init=epsg:4326"
#plotted <- spTransform(sch_dc, CRS=CRS(projection(ct)))
plot(sch_dc, pch=20, col="steelblue", add=T)
, add=T
#library(rgeos)
#library(dismo)
#ct <- gmap("Connecticut, US")
#circles <- gBuffer(sch_dc, width=457.2)
#str(circles)
#plot(circles)
| /2015/12/ct-drug-zones/unsorted/area_analysis.R | permissive | steve-kasica/data-1 | R | false | false | 3,317 | r | library(plyr)
daycares <- read.csv("shps/kendall/day_cares.csv", stringsAsFactors=FALSE)
public <- read.csv("shps/kendall/public_schools.csv", stringsAsFactors=FALSE)
private <- read.csv("shps/kendall/private_schools.csv", stringsAsFactors=FALSE)
housing <- read.csv("shps/kendall/pub_housing.csv", stringsAsFactors=FALSE)
datadc <- daycares[,c("NAME", "CITY", "LAT", "LON")]
dataps <- public[,c("NAME", "LCITY", "LAT", "LON")]
datapr <- private[,c("NAME", "LCITY", "LAT", "LON")]
dataph <- housing[,c("PROJECT_NA", "PLACE_NM2K", "Y", "X")]
colnames(dataph) <- c("NAME", "LCITY", "LAT", "LON")
colnames(datadc) <- c("NAME", "LCITY", "LAT", "LON")
datadc$TYPE <- "daycare"
dataps$TYPE <- "public school"
datapr$TYPE <- "private school"
dataph$TYPE <- "public housing"
data_all <- rbind(datadc, dataps)
data_all <- rbind(data_all, datapr)
data_all <- rbind(data_all, dataph)
write.csv(data_all, "data_all.csv")
#OLD DATA
datasc <- schools[,c("lat","lng")]
datahs <- housing[,c("LAT","LON")]
datahs$type <- "housing"
#lat lng for daycares
daycares$spot <- gsub("^.*?\\(","", daycares$Location)
daycares$spot <- gsub("\\)", "", daycares$spot)
list <- strsplit(daycares$spot, ", ")
df <- ldply(list)
colnames(df) <- c("lat", "lng")
daycares <- cbind(daycares, df)
#lat lng for schools
schools$spot <- gsub("^.*?\\(","", schools$Location.1)
schools$spot <- gsub("\\)", "", schools$spot)
list2 <- strsplit(schools$spot, ", ")
df2 <- ldply(list2)
colnames(df2) <- c("lat", "lng")
schools <- cbind(schools, df2)
datadc <- daycares[,c("lat","lng")]
datasc <- schools[,c("lat","lng")]
datadc$type <- "daycare"
datasc$type <- "school"
colnames(datahs) <- c("lat","lng", "type")
datastuff <- rbind(datadc, datasc)
datastuff <- rbind(datastuff, datahs)
# Let's calculate area square miles per town
#load rgdal package
library(rgdal)
## load your polygone shapefile
dsn<-"townsmap"
ogrListLayers(dsn)
#list the layers in the above directory
lol<-readOGR(dsn,"towns")
#shapefile name is lol
#check your data
summary(lol)
getClass("Polygon")
ur.area<-sapply(slot(lol, "polygons"), function(x) sapply(slot(x, "Polygons"), slot, "area"))
##check the areas
str(ur.area)
area.map(dsn)
require(rgeos)
gArea(ur.area)
require(gtools)
require(ggplot2)
require(rgdal)
require(scales)
require(ggmap)
require(dplyr)
require(Cairo)
require(gpclib)
require(maptools)
require(reshape)
gpclibPermit()
gpclibPermitStatus()
towntracts <- readOGR(dsn="townsmap", layer="towns")
#towntracts <- fortify(towntracts, region="NAME10")
#gArea(towntracts)
# area.map
require(maps)
require(raster)
#area.map(towntracts, "NAME10")
# Blah, ok let's just look at schools
sch <- subset(schools, select=c("School.Name", "lat", "lng"))
colnames(sch) <- c("Name", "Lat", "Lng")
dc <- subset(daycares, select=c("Name", "lat", "lng"))
colnames(dc) <- c("Name", "Lat", "Lng")
sch_dc <- rbind(sch, dc)
sch_dc$Lat <- as.numeric(sch_dc$Lat)
sch_dc$Lng <- as.numeric(sch_dc$Lng)
plot(towntracts)
#coordinates(sch_dc) <- c("Lng", "Lat")
#projection(sch_dc) <- "+init=epsg:4326"
#plotted <- spTransform(sch_dc, CRS=CRS(projection(ct)))
plot(sch_dc, pch=20, col="steelblue", add=T)
, add=T
#library(rgeos)
#library(dismo)
#ct <- gmap("Connecticut, US")
#circles <- gBuffer(sch_dc, width=457.2)
#str(circles)
#plot(circles)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wroboclient.R
\name{absPosition.robo}
\alias{absPosition.robo}
\title{Posição no sistema de referência abosluto}
\usage{
\method{absPosition}{robo}(dev)
}
\arguments{
\item{dev}{Um objeto de classe \code{robo} contendo informações sobre servidor XML-RPC}
}
\value{
Lista contendo as coordenadas x, y e z.
}
\description{
Retorna a posição do robô cartesiano no sistema de referência absoluto.
O sistema de referência absoluto é o que está na placa controladora do Robo
}
\examples{
absPosition(dev)
}
| /man/absPosition.robo.Rd | permissive | tunelipt/rwrobo | R | false | true | 592 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wroboclient.R
\name{absPosition.robo}
\alias{absPosition.robo}
\title{Posição no sistema de referência abosluto}
\usage{
\method{absPosition}{robo}(dev)
}
\arguments{
\item{dev}{Um objeto de classe \code{robo} contendo informações sobre servidor XML-RPC}
}
\value{
Lista contendo as coordenadas x, y e z.
}
\description{
Retorna a posição do robô cartesiano no sistema de referência absoluto.
O sistema de referência absoluto é o que está na placa controladora do Robo
}
\examples{
absPosition(dev)
}
|
args = commandArgs(TRUE)
exprs = args[1]
n = args[2]
path = args[3]
path_output = args[4]
library('e1071')
setwd(path)
exprs = as.matrix(exprs)
exprs = read.table(exprs, header=T,row.names=1,sep="\t")
n = as.numeric(n)
c1 <- cmeans(exprs, n, iter.max = 100)
list <- as.list(c1$cluster)
vector <- as.vector(c1$cluster)
write.table(as.matrix(list), path_output, sep="\t") | /Clustering/cmeans.R | no_license | anandksrao/Gene_coexpression_scripts | R | false | false | 394 | r | args = commandArgs(TRUE)
exprs = args[1]
n = args[2]
path = args[3]
path_output = args[4]
library('e1071')
setwd(path)
exprs = as.matrix(exprs)
exprs = read.table(exprs, header=T,row.names=1,sep="\t")
n = as.numeric(n)
c1 <- cmeans(exprs, n, iter.max = 100)
list <- as.list(c1$cluster)
vector <- as.vector(c1$cluster)
write.table(as.matrix(list), path_output, sep="\t") |
#So: cauchy at 0, 1 seems to not converge on a a straightforward result with iter=300,warmup=200, or even warmup=1500, iter=2000. This may be a problem.
library(rstan)
source("stanlba/lba_rl_joint_setup.R")
require(R.utils)
options(mc.cores = 6)
source("stanlba/singlelevelmodel/lba_rl_joint_v1_functions.R")
source("stanlba/singlelevelmodel/lba_rl_joint_v7_functions.R")
source("stanlba/singlelevelmodel/lba_rl_joint_v10_functions.R")
#we have problems running all subjects in a single run.
#so let's have this save as we go, and then reload and avoid re-saving if there's already a saved file.
lba_rl_version<-"joint_20180713_1"
model.subversion<-"j"
single_run_dir<-paste0(localsettings$data.dir,"lba_rl")
output_dir<-paste0(single_run_dir,"/",lba_rl_version, "/")
dir.create(single_run_dir, showWarnings = FALSE)
dir.create(output_dir, showWarnings = FALSE)
#file_folder<-"/Users/benjaminsmith/Dropbox/joint-modeling/reversal-learning/behavioral-analysis/data/lba_rl_single_estimates.RData"
#load(file=file_folder)
Rhat_corevals_limit=1.05 # I don't care about this at the moment. I just want to take a look at what we'regetting!
Rhat_general_limit=1.1
results.list<-list()
model.name<-"lba_rl_single_exp_joint_v11_priorsamplejoint2"
cat("Compiling model...")
lba_rl_single_joint<-stan_model(paste0('stanlba/stanfiles/incremental/',model.name,'.stan'))
cat("compiled.\n")
regions<-get_dmn_regions()[1:5]
#100,140,218,261,334
#ll=100;ul=139
#ll=140;ul=217
#ll=218;ul=260
#ll=261;ul=334
#ll=335;ul=400
#ll=100;ul=400
ll=106;ul=106
for (sid in c(106)){#unique(rawdata$subid)[unique(rawdata$subid)>=ll & unique(rawdata$subid)<=ul]){
for (r in unique(rawdata[subid==sid,runid])[1]){#r<-1
motivations<-unique(rawdata[subid==sid & runid==r,Motivation])
for(m in motivations[1]){#m<-"punishment"
package_filepath<-paste0(output_dir,"run_package_",sid,"_",r,"_",m,"_",model.name,model.subversion,".RData")
srm.data<-select_rawdata_cols_for_run(rawdata,sid,r,m)
if(!file.exists(package_filepath)){
baseseed<-53171370#129221139#sample.int(.Machine$integer.max, 1)
roundseed<-sid+r+which(motivations==m)+baseseed
model_attempts=0
warmup=400
iterations=500
#warmup=10;iterations=20
try_this_model=TRUE
while(try_this_model==TRUE){
model_attempts<-model_attempts+1
start_time<-Sys.time()
n_chains=6
# srm.fit <- tryCatch(
# expr = {
# withTimeout({
srm.fit <-
sampling(lba_rl_single_joint,
data = list(LENGTH=dim(srm.data)[1],
NUM_CHOICES=2,
prior_cauchygamma=1,
prior_cholesky=2.5,
DELTA_N=2,
THETA_N=length(regions)
),
warmup = warmup,
iter = iterations,
init= get_starting_values(n_chains,thetaDelta_count=length(regions)+2),
chains = n_chains,
seed = roundseed,
control = list(max_treedepth = 13,adapt_delta=0.85))
# },timeout = 60*120,onTimeout = "error")
# },
# TimeoutException = function(ex){cat(paste0("could not run calculation for sid",sid," rid", r, " m", m, " within 120 minutes. skipping!"))
# cat(ex)
# cat("\n")
# return(NULL)
# }
# )
end_time<-Sys.time()
if(!is.null(srm.fit)){
#only save it if Rhat is within the accepted range.
test_rhat_vals<-function(rhat_vector){
#ignore "nan" values as long as they're not all NaN
if (all(is.nan(rhat_vector)))return(FALSE)
if (all(rhat_vector[!is.nan(rhat_vector)]<=Rhat_general_limit)){
if(all(rhat_vector[1:3]<=Rhat_corevals_limit)){
return(TRUE)
}
}
return(FALSE)
}
#ignore this test!
if(TRUE){#test_rhat_vals(summary(srm.fit)$summary[,"Rhat"])){
try_this_model<-FALSE#we've found...no need to try again.
run_package<-list("sid"=sid,"rid"=r,"motivation"=m,fit=srm.fit,
duration=as.numeric(end_time-start_time),"iterations"=iterations,
"baseseed"=baseseed,"roundseed"=roundseed)
save(run_package,file=package_filepath)
results.list<-c(results.list,list(run_package))
}else if (iterations<5000){
print(paste0("One or more Rhat values for sid",sid," rid", r, " m", m, " were outside the accepted range. Trying again with more iterations and a different seed."))
baseseed=baseseed+1
iterations=iterations+warmup*2
warmup=warmup+warmup*2
}else{
print(paste0("One or more Rhat values for sid",sid," rid", r, " m", m, " were outside the accepted range even for up to 5000 iterations. Giving up."))
try_this_model<-FALSE
}
}else{
print("model couldn't be calculated within the selected time limit. The policy for this run is not to repeated timed-out models.")
try_this_model<-FALSE
}
if(model_attempts>=3){
try_this_model<-FALSE
}
}
}else{
print(paste0("loading from file sid ",sid, "; r ",r, "; m ", m))
load(package_filepath)
results.list<-c(results.list,list(run_package))
}
}
}
}
srm.fit
| /stanlba/singlelevelmodel/lba_rl_joint_v11j.R | permissive | bjsmith/reversallearning | R | false | false | 5,805 | r | #So: cauchy at 0, 1 seems to not converge on a a straightforward result with iter=300,warmup=200, or even warmup=1500, iter=2000. This may be a problem.
library(rstan)
source("stanlba/lba_rl_joint_setup.R")
require(R.utils)
options(mc.cores = 6)
source("stanlba/singlelevelmodel/lba_rl_joint_v1_functions.R")
source("stanlba/singlelevelmodel/lba_rl_joint_v7_functions.R")
source("stanlba/singlelevelmodel/lba_rl_joint_v10_functions.R")
#we have problems running all subjects in a single run.
#so let's have this save as we go, and then reload and avoid re-saving if there's already a saved file.
lba_rl_version<-"joint_20180713_1"
model.subversion<-"j"
single_run_dir<-paste0(localsettings$data.dir,"lba_rl")
output_dir<-paste0(single_run_dir,"/",lba_rl_version, "/")
dir.create(single_run_dir, showWarnings = FALSE)
dir.create(output_dir, showWarnings = FALSE)
#file_folder<-"/Users/benjaminsmith/Dropbox/joint-modeling/reversal-learning/behavioral-analysis/data/lba_rl_single_estimates.RData"
#load(file=file_folder)
Rhat_corevals_limit=1.05 # I don't care about this at the moment. I just want to take a look at what we'regetting!
Rhat_general_limit=1.1
results.list<-list()
model.name<-"lba_rl_single_exp_joint_v11_priorsamplejoint2"
cat("Compiling model...")
lba_rl_single_joint<-stan_model(paste0('stanlba/stanfiles/incremental/',model.name,'.stan'))
cat("compiled.\n")
regions<-get_dmn_regions()[1:5]
#100,140,218,261,334
#ll=100;ul=139
#ll=140;ul=217
#ll=218;ul=260
#ll=261;ul=334
#ll=335;ul=400
#ll=100;ul=400
ll=106;ul=106
for (sid in c(106)){#unique(rawdata$subid)[unique(rawdata$subid)>=ll & unique(rawdata$subid)<=ul]){
for (r in unique(rawdata[subid==sid,runid])[1]){#r<-1
motivations<-unique(rawdata[subid==sid & runid==r,Motivation])
for(m in motivations[1]){#m<-"punishment"
package_filepath<-paste0(output_dir,"run_package_",sid,"_",r,"_",m,"_",model.name,model.subversion,".RData")
srm.data<-select_rawdata_cols_for_run(rawdata,sid,r,m)
if(!file.exists(package_filepath)){
baseseed<-53171370#129221139#sample.int(.Machine$integer.max, 1)
roundseed<-sid+r+which(motivations==m)+baseseed
model_attempts=0
warmup=400
iterations=500
#warmup=10;iterations=20
try_this_model=TRUE
while(try_this_model==TRUE){
model_attempts<-model_attempts+1
start_time<-Sys.time()
n_chains=6
# srm.fit <- tryCatch(
# expr = {
# withTimeout({
srm.fit <-
sampling(lba_rl_single_joint,
data = list(LENGTH=dim(srm.data)[1],
NUM_CHOICES=2,
prior_cauchygamma=1,
prior_cholesky=2.5,
DELTA_N=2,
THETA_N=length(regions)
),
warmup = warmup,
iter = iterations,
init= get_starting_values(n_chains,thetaDelta_count=length(regions)+2),
chains = n_chains,
seed = roundseed,
control = list(max_treedepth = 13,adapt_delta=0.85))
# },timeout = 60*120,onTimeout = "error")
# },
# TimeoutException = function(ex){cat(paste0("could not run calculation for sid",sid," rid", r, " m", m, " within 120 minutes. skipping!"))
# cat(ex)
# cat("\n")
# return(NULL)
# }
# )
end_time<-Sys.time()
if(!is.null(srm.fit)){
#only save it if Rhat is within the accepted range.
test_rhat_vals<-function(rhat_vector){
#ignore "nan" values as long as they're not all NaN
if (all(is.nan(rhat_vector)))return(FALSE)
if (all(rhat_vector[!is.nan(rhat_vector)]<=Rhat_general_limit)){
if(all(rhat_vector[1:3]<=Rhat_corevals_limit)){
return(TRUE)
}
}
return(FALSE)
}
#ignore this test!
if(TRUE){#test_rhat_vals(summary(srm.fit)$summary[,"Rhat"])){
try_this_model<-FALSE#we've found...no need to try again.
run_package<-list("sid"=sid,"rid"=r,"motivation"=m,fit=srm.fit,
duration=as.numeric(end_time-start_time),"iterations"=iterations,
"baseseed"=baseseed,"roundseed"=roundseed)
save(run_package,file=package_filepath)
results.list<-c(results.list,list(run_package))
}else if (iterations<5000){
print(paste0("One or more Rhat values for sid",sid," rid", r, " m", m, " were outside the accepted range. Trying again with more iterations and a different seed."))
baseseed=baseseed+1
iterations=iterations+warmup*2
warmup=warmup+warmup*2
}else{
print(paste0("One or more Rhat values for sid",sid," rid", r, " m", m, " were outside the accepted range even for up to 5000 iterations. Giving up."))
try_this_model<-FALSE
}
}else{
print("model couldn't be calculated within the selected time limit. The policy for this run is not to repeated timed-out models.")
try_this_model<-FALSE
}
if(model_attempts>=3){
try_this_model<-FALSE
}
}
}else{
print(paste0("loading from file sid ",sid, "; r ",r, "; m ", m))
load(package_filepath)
results.list<-c(results.list,list(run_package))
}
}
}
}
srm.fit
|
#' plot model comparisons
#'
#' Creates a user-chosen set of plots comparing model output from a summary of
#' multiple models, where the collection was created using the
#' `SSsummarize` function.
#'
#'
#' @param summaryoutput List created by `SSsummarize`
#' @param subplots Vector of subplots to be created
#' Numbering of subplots is as follows:
#' \itemize{
#' \item 1 spawning biomass
#' \item 2 spawning biomass with uncertainty intervals
#' \item 3 biomass ratio (hopefully equal to fraction of unfished)
#' \item 4 biomass ratio with uncertainty
#' \item 5 SPR ratio
#' \item 6 SPR ratio with uncertainty
#' \item 7 F value
#' \item 8 F value with uncertainty
#' \item 9 recruits
#' \item 10 recruits with uncertainty
#' \item 11 recruit devs
#' \item 12 recruit devs with uncertainty
#' \item 13 index fits
#' \item 14 index fits on a log scale
#' \item 15 phase plot
#' \item 16 densities
#' \item 17 cumulative densities
#' }
#' @template plot
#' @template print
#' @param png Has same result as `print`, included for consistency with
#' `SS_plots`.
#' @param pdf Write output to PDF file? Can't be used in conjunction with
#' `png` or `print`.
#' @param models Optional subset of the models described in
#' `summaryoutput`. Either "all" or a vector of numbers indicating
#' columns in summary tables.
#' @param endyrvec Optional single year or vector of years representing the
#' final year of values to show for each model. By default it is set to the
#' ending year specified in each model.
#' @param indexfleets Fleet numbers for each model to compare
#' indices of abundance. Can take different forms:
#' \itemize{
#' \item NULL: (default) create a separate plot for each index as long as the fleet
#' numbering is the same across all models.
#' \item integer: create a single comparison plot for the chosen index
#' \item vector of length equal to number of models: a single fleet number
#' for each model to be compared in a single plot
#' \item list: list of fleet numbers associated with indices within each
#' model to be compared, where the list elements are each a vector of the
#' same length but the names of the list elements don't matter and can be
#' absent.
#' }
#' @param indexUncertainty Show uncertainty intervals on index data?
#' Default=FALSE because if models have any extra standard deviations added,
#' these intervals may differ across models.
#' @param indexQlabel Add catchability to legend in plot of index fits
#' (TRUE/FALSE)?
#' @param indexQdigits Number of significant digits for catchability in legend
#' (if `indexQlabel = TRUE`)
#' @param indexSEvec Optional replacement for the SE values in
#' `summaryoutput[["indices"]]` to deal with the issue of differing uncertainty by
#' models described above.
#' @param indexPlotEach TRUE plots the observed index for each model with
#' colors, or FALSE just plots observed once in black dots.
#' @template labels
#' @param col Optional vector of colors to be used for lines. Input NULL
#' makes use of `rich.colors.short` function.
#' @param shadecol Optional vector of colors to be used for shading uncertainty
#' intervals. The default (NULL) is to use the same colors provided by
#' `col` (either the default or a user-chosen input) and make them
#' more transparent by applying the `shadealpha` input as an alpha
#' transparency value (using the `adjustcolor()` function)
#' @param pch Optional vector of plot character values
#' @param lty Optional vector of line types
#' @param lwd Optional vector of line widths
#' @param spacepoints Number of years between points shown on top of lines (for
#' long timeseries, points every year get mashed together)
#' @param staggerpoints Number of years to stagger the first point (if
#' `spacepoints > 1`) for each line (so that adjacent lines have points in
#' different years)
#' @param initpoint Year value for first point to be added to lines.
#' Points added to plots are those that satisfy
#' (Yr-initpoint)%%spacepoints == (staggerpoints*iline)%%spacepoints
#' @param tickEndYr TRUE/FALSE switch to turn on/off extra axis mark at final
#' year in timeseries plots.
#' @param shadeForecast TRUE/FALSE switch to turn on off shading of years beyond
#' the maximum ending year of the models
#' @param xlim Optional x limits
#' @param ylimAdj Multiplier for ylim parameter. Allows additional white space
#' to fit legend if necessary. Default=1.05.
#' @param xaxs Choice of xaxs parameter (see ?par for more info)
#' @param yaxs Choice of yaxs parameter (see ?par for more info)
#' @param type Type parameter passed to points (default 'o' overplots points on
#' top of lines)
#' @param uncertainty Show plots with uncertainty intervals? Either a single
#' TRUE/FALSE value, or a vector of TRUE/FALSE values for each model,
#' or a set of integers corresponding to the choice of models.
#' @param shadealpha Transparency adjustment used to make default shadecol
#' values (implemented as `adjustcolor(col=col, alpha.f=shadealpha)`)
#' @template legend
#' @param legendlabels Optional vector of labels to include in legend. Default
#' is 'model1','model2',etc.
#' @template legendloc
#' @param legendorder Optional vector of model numbers that can be used to have
#' the legend display the model names in an order that is different than that
#' which is represented in the summary input object.
#' @param legendncol Number of columns for the legend.
#' @param btarg Target biomass value at which to show a line (set to 0 to
#' remove)
#' @param minbthresh Minimum biomass threshold at which to show a line (set to
#' 0 to remove)
#' @param sprtarg Target value for SPR-ratio where line is drawn in the SPR
#' plots and phase plot.
#' @template pwidth
#' @template pheight
#' @template punits
#' @template res
#' @template ptsize
#' @template plotdir
#' @param filenameprefix Additional text to append to PNG or PDF file names.
#' It will be separated from default name by an underscore.
#' @param densitynames Vector of names (or subset of names) of parameters or
#' derived quantities contained in `summaryoutput[["pars"]][["Label"]]` or
#' `summaryoutput[["quants"]][["Label"]]` for which to make density plots
#' @param densityxlabs Optional vector of x-axis labels to use in the density
#' plots (must be equal in length to the printed vector of quantities that
#' match the `densitynames` input)
#' @param rescale TRUE/FALSE control of automatic rescaling of units into
#' thousands, millions, or billions
#' @param densityscalex Scalar for upper x-limit in density plots (values below
#' 1 will cut off the right tail to provide better contrast among narrower
#' distributions
#' @param densityscaley Scalar for upper y-limit in density plots (values below
#' 1 will cut off top of highest peaks to provide better contrast among broader
#' distributions
#' @param densityadjust Multiplier on bandwidth of kernel in density function
#' used for smoothing MCMC posteriors. See 'adjust' in ?density for details.
#' @param densitysymbols Add symbols along lines in density plots. Quantiles
#' are `c(0.025,0.1,0.25,0.5,0.75,0.9,0.975)`.
#' @param densitytails Shade tails outside of 95% interval darker in
#' density plots?
#' @param densitymiddle Shade middle inside of 95% interval darker in
#' density plots?
#' @param densitylwd Line width for density plots
#' @param fix0 Always include 0 in the density plots?
#' @param new Create new empty plot window
#' @param add Allows single plot to be added to existing figure. This needs to
#' be combined with specific 'subplots' input to make sure only one thing gets
#' added.
#' @param par list of graphics parameter values passed to the `par`
#' function
#' @template verbose
#' @param mcmcVec Vector of TRUE/FALSE values (or single value) indicating
#' whether input values are from MCMC or to use normal distribution around
#' MLE
#' @param show_equilibrium Whether to show the equilibrium values for
#' SSB. For some model comparisons, these might not be comparable and thus
#' useful to turn off. Defaults to TRUE.
#' @author Ian G. Taylor, John R. Wallace
#' @export
#' @seealso [SS_plots()], [SSsummarize()],
#' [SS_output()], [SSgetoutput()]
#' @examples
#' \dontrun{
#' # directories where models were run need to be defined
#' dir1 <- "c:/SS/mod1"
#' dir2 <- "c:/SS/mod2"
#'
#' # read two models
#' mod1 <- SS_output(dir = dir1)
#' mod2 <- SS_output(dir = dir2)
#'
#' # create list summarizing model results
#' mod.sum <- SSsummarize(list(mod1, mod2))
#'
#' # plot comparisons
#' SSplotComparisons(mod.sum, legendlabels = c("First model", "Second model"))
#'
#' # Example showing comparison of MLE to MCMC results where the mcmc would have
#' # been run in the subdirectory 'c:/SS/mod1/mcmc'
#' mod1 <- SS_output(dir = "c:/SS/mod1", dir.mcmc = "mcmc")
#' # pass the same model twice to SSsummarize in order to plot it twice
#' mod.sum <- SSsummarize(list(mod1, mod1))
#' # compare MLE to MCMC
#' SSplotComparisons(mod.sum,
#' legendlabels = c("MCMC", "MLE"),
#' mcmcVec = c(TRUE, FALSE)
#' )
#' }
#'
SSplotComparisons <-
function(summaryoutput, subplots = 1:20,
plot = TRUE, print = FALSE, png = print, pdf = FALSE,
models = "all",
endyrvec = NULL,
indexfleets = NULL,
indexUncertainty = TRUE,
indexQlabel = TRUE,
indexQdigits = 4,
indexSEvec = NULL,
# TRUE in following command plots the observed index for each model
# with colors, or FALSE just plots observed once in black dots
indexPlotEach = FALSE,
labels = c(
"Year", # 1
"Spawning biomass (t)", # 2
"Fraction of unfished", # 3
"Age-0 recruits (1,000s)", # 4
"Recruitment deviations", # 5
"Index", # 6
"Log index", # 7
"SPR-related quantity", # 8 automatically updated when consistent
"Density", # 9
"Management target", # 10
"Minimum stock size threshold", # 11
"Spawning output", # 12
"Harvest rate" # 13
),
col = NULL, shadecol = NULL,
pch = NULL, lty = 1, lwd = 2,
spacepoints = 10,
staggerpoints = 1,
initpoint = 0,
tickEndYr = TRUE,
shadeForecast = TRUE,
xlim = NULL, ylimAdj = 1.05,
xaxs = "i", yaxs = "i",
type = "o", uncertainty = TRUE, shadealpha = 0.1,
legend = TRUE, legendlabels = NULL, legendloc = "topright",
legendorder = NULL, legendncol = 1,
sprtarg = NULL, btarg = NULL, minbthresh = NULL,
pwidth = 6.5, pheight = 5.0, punits = "in", res = 300,
ptsize = 10,
plotdir = NULL,
filenameprefix = "",
densitynames = c("SSB_Virgin", "R0"),
densityxlabs = NULL,
rescale = TRUE,
densityscalex = 1,
densityscaley = 1,
densityadjust = 1,
densitysymbols = TRUE,
densitytails = TRUE,
densitymiddle = FALSE,
densitylwd = 1,
fix0 = TRUE,
new = TRUE,
add = FALSE,
par = list(mar = c(5, 4, 1, 1) + .1),
verbose = TRUE,
mcmcVec = FALSE,
show_equilibrium = TRUE) {
# switch to avoid repetition of warning about mean recruitment
meanRecWarning <- TRUE
ymax_vec <- rep(NA, 17) # vector of ymax values for each plot
# local version of save_png which doesn't relate to plotinfo and
# also adds control over 'filenameprefix' and 'par',
# (where the code is not following good practices and
# those arguments are not formally passed to the function)
save_png_comparisons <- function(file) {
# if extra text requested, add it before extention in file name
file <- paste0(filenameprefix, file)
# open png file
png(
filename = file.path(plotdir, file),
width = pwidth,
height = pheight,
units = punits,
res = res,
pointsize = ptsize
)
# change graphics parameters to input value
par(par)
}
if (png) {
print <- TRUE
}
if (png & is.null(plotdir)) {
stop("To print PNG files, you must supply a directory as 'plotdir'")
}
# check for internal consistency
if (pdf & png) {
stop("To use 'pdf', set 'print' or 'png' to FALSE.")
}
if (pdf) {
if (is.null(plotdir)) {
stop("To write to a PDF, you must supply a directory as 'plotdir'")
}
pdffile <- file.path(
plotdir,
paste0(
filenameprefix, "SSplotComparisons_",
format(Sys.time(), "%d-%b-%Y_%H.%M"), ".pdf"
)
)
pdf(file = pdffile, width = pwidth, height = pheight)
if (verbose) {
message("PDF file with plots will be:", pdffile)
}
par(par)
}
# subfunction to add legend
# legendfun <- function(legendlabels, cumulative = FALSE) {
# if (cumulative) {
# legendloc <- "topleft"
# }
# if (is.numeric(legendloc)) {
# Usr <- par()$usr
# legendloc <- list(
# x = Usr[1] + legendloc[1] * (Usr[2] - Usr[1]),
# y = Usr[3] + legendloc[2] * (Usr[4] - Usr[3])
# )
# }
#
# # if type input is "l" then turn off points on top of lines in legend
# legend.pch <- pch
# if (type == "l") {
# legend.pch <- rep(NA, length(pch))
# }
# legend(legendloc,
# legend = legendlabels[legendorder],
# col = col[legendorder],
# lty = lty[legendorder],
# seg.len = 2,
# lwd = lwd[legendorder],
# pch = legend.pch[legendorder],
# bty = "n",
# ncol = legendncol
# )
# }
# get stuff from summary output
n <- summaryoutput[["n"]]
nsexes <- summaryoutput[["nsexes"]]
startyrs <- summaryoutput[["startyrs"]]
endyrs <- summaryoutput[["endyrs"]]
pars <- summaryoutput[["pars"]]
parsSD <- summaryoutput[["parsSD"]]
parphases <- summaryoutput[["parphases"]]
quants <- summaryoutput[["quants"]]
quantsSD <- summaryoutput[["quantsSD"]]
SpawnBio <- summaryoutput[["SpawnBio"]]
SpawnBioLower <- summaryoutput[["SpawnBioLower"]]
SpawnBioUpper <- summaryoutput[["SpawnBioUpper"]]
Bratio <- summaryoutput[["Bratio"]]
BratioLower <- summaryoutput[["BratioLower"]]
BratioUpper <- summaryoutput[["BratioUpper"]]
SPRratio <- summaryoutput[["SPRratio"]]
SPRratioLower <- summaryoutput[["SPRratioLower"]]
SPRratioUpper <- summaryoutput[["SPRratioUpper"]]
Fvalue <- summaryoutput[["Fvalue"]]
FvalueLower <- summaryoutput[["FvalueLower"]]
FvalueUpper <- summaryoutput[["FvalueUpper"]]
recruits <- summaryoutput[["recruits"]]
recruitsLower <- summaryoutput[["recruitsLower"]]
recruitsUpper <- summaryoutput[["recruitsUpper"]]
recdevs <- summaryoutput[["recdevs"]]
recdevsLower <- summaryoutput[["recdevsLower"]]
recdevsUpper <- summaryoutput[["recdevsUpper"]]
indices <- summaryoutput[["indices"]]
# note that "mcmc" is a a list of dataframes,
# 1 for each model with mcmc output
mcmc <- summaryoutput[["mcmc"]]
lowerCI <- summaryoutput[["lowerCI"]]
upperCI <- summaryoutput[["upperCI"]]
SpawnOutputUnits <- summaryoutput[["SpawnOutputUnits"]]
btargs <- summaryoutput[["btargs"]]
minbthreshs <- summaryoutput[["minbthreshs"]]
sprtargs <- summaryoutput[["sprtargs"]]
SPRratioLabels <- summaryoutput[["SPRratioLabels"]]
FvalueLabels <- summaryoutput[["FvalueLabels"]]
# checking for the same reference points across models
if (is.null(btarg)) {
btarg <- unique(btargs)
if (length(btarg) > 1) {
warning("setting btarg = -999 because models don't have matching values")
btarg <- -999
}
}
if (is.null(minbthresh)) {
minbthresh <- unique(minbthreshs)
if (length(minbthresh) > 1) {
warning("setting minbthresh = -999 because models don't have matching values")
minbthresh <- -999
}
}
if (is.null(sprtarg)) {
sprtarg <- unique(sprtargs)
if (length(sprtarg) > 1) {
warning("setting sprtarg = -999 because models don't have matching values")
sprtarg <- -999
}
}
SPRratioLabel <- unique(SPRratioLabels)
if (length(SPRratioLabel) > 1) {
warning(
"setting label for SPR plot to 8th element of input 'labels' ",
"because the models don't have matching labels"
)
SPRratioLabel <- labels[8]
}
FvalueLabel <- unique(FvalueLabels)
if (length(FvalueLabel) > 1) {
warning(
"setting label for F plot to 13th element of input 'labels' ",
"because the models don't have matching labels"
)
FvalueLabel <- labels[13]
} else {
FvalueLabel <- gsub("_", " ", FvalueLabel)
}
### process input for which models have uncertainty shown
##
# if vector is numeric rather than logical, convert to logical
if (!is.logical(uncertainty) & is.numeric(uncertainty)) {
if (any(!uncertainty %in% 1:n)) {
# stop if numerical values aren't integers <= n
stop(
"'uncertainty' should be a subset of the integers\n",
" 1-", n, ", where n=", n, " is the number of models.\n",
" Or it can be a single TRUE/FALSE value.\n",
" Or a vector of TRUE/FALSE, of length n=", n
)
} else {
# convert integers to logical
uncertainty <- 1:n %in% uncertainty
}
}
# if a single value, repeat for all models
if (is.logical(uncertainty) & length(uncertainty) == 1) {
uncertainty <- rep(uncertainty, n)
}
# if all that hasn't yet made it length n, then stop
if (length(uncertainty) != n) {
stop(
"'uncertainty' as TRUE/FALSE should have length 1 or n.\n",
" length(uncertainty) = ", length(uncertainty)
)
}
# some feedback about uncertainty settings
if (all(uncertainty)) {
message("showing uncertainty for all models")
}
if (!any(uncertainty)) {
message("not showing uncertainty for any models")
}
if (any(uncertainty) & !all(uncertainty)) {
message(
"showing uncertainty for model",
ifelse(sum(uncertainty) > 1, "s: ", " "),
paste(which(uncertainty), collapse = ",")
)
}
for (i in 1:n) {
if (all(is.na(quantsSD[, i]) | quantsSD[, i] == 0)) {
message("No uncertainty available for model ", i)
uncertainty[i] <- FALSE
}
}
#### no longer dividing by 2 for single-sex models
if (length(unique(nsexes)) > 1) {
warning(
"SSplotComparisons no longer divides SpawnBio by 2 for single-sex models\n",
"to get female-only spawning biomass output by SS for a single-sex model,\n",
"use the new Nsexes = -1 option in the data file."
)
}
# check number of models to be plotted
if (models[1] == "all") {
models <- 1:n
}
nlines <- length(models)
# check for mcmc
if (any(mcmcVec) & length(mcmc) == 0) {
mcmcVec <- FALSE
warning("Setting mcmcVec = FALSE because summaryoutput[['mcmc']] is empty")
}
# check length of mcmcVec
if (nlines > 1 & length(mcmcVec) == 1) {
mcmcVec <- rep(mcmcVec, nlines)
}
if (nlines != length(mcmcVec)) {
stop("Input 'mcmcVec' must equal 1 or the number of models.\n")
}
# if index plots are requested, do some checks on inputs
if (any(subplots %in% 13:14) & !is.null(indices) && nrow(indices) > 0) {
# check indexfleets
if (is.null(indexfleets)) {
# if indexfleets is NULL
indexfleets <- list()
for (imodel in 1:n) {
indexfleets[[paste0("model", imodel)]] <-
sort(unique(indices[["Fleet"]][indices[["imodel"]] == imodel]))
}
} else {
# if indexfleets is provided
if (!is.null(indexfleets)) {
# if a single number is provided, then repeat it n times
if (is.vector(indexfleets) & length(indexfleets) == 1) {
indexfleets <- rep(indexfleets, n)
}
if (length(indexfleets) != n) {
warning(
"Skipping index plots: length(indexfleets) should be 1 or n = ",
n, "."
)
indexfleets <- NULL
}
}
}
# check for mismatched lengths of list elements
if (!length(unique(lapply(indexfleets, FUN = length))) == 1) {
warning(
"Skipping index plots;\n",
"Fleets have different numbers of indices listed in 'indexfleets'."
)
indexfleets <- NULL
}
# figure out suffix to add to index plots
index_plot_suffix <- rep("", length(indexfleets))
# if more than one index is compared, add suffix to filename
if (length(indexfleets[[1]]) > 1) {
for (iindex in 1:length(indexfleets[[1]])) {
fleets <- as.numeric(data.frame(indexfleets)[iindex, ])
if (length(unique(fleets)) == 1) {
index_plot_suffix[iindex] <- paste0("_flt", fleets[1])
} else {
index_plot_suffix[iindex] <- paste0("_index", iindex)
}
}
}
} # end check for index plots (subplots %in% 13:14)
# setup colors, points, and line types
if (is.null(col) & nlines > 3) col <- rich.colors.short(nlines + 1)[-1]
if (is.null(col) & nlines < 3) col <- rich.colors.short(nlines)
if (is.null(col) & nlines == 3) col <- c("blue", "red", "green3")
if (is.null(shadecol)) {
# new approach thanks to Trevor Branch
shadecol <- adjustcolor(col, alpha.f = shadealpha)
}
# set pch values if no input
if (is.null(pch)) {
pch <- rep(1:25, 10)[1:nlines]
}
# if line stuff is shorter than number of lines, recycle as needed
if (length(col) < nlines) col <- rep(col, nlines)[1:nlines]
if (length(pch) < nlines) pch <- rep(pch, nlines)[1:nlines]
if (length(lty) < nlines) lty <- rep(lty, nlines)[1:nlines]
if (length(lwd) < nlines) lwd <- rep(lwd, nlines)[1:nlines]
if (!is.expression(legendlabels[1]) && is.null(legendlabels)) {
legendlabels <- paste("model", 1:nlines)
}
# open new window if requested
if (plot & new & !pdf) {
dev.new(
width = pwidth,
height = pheight,
pointsize = ptsize,
record = TRUE
)
par(par)
}
# get MCMC results if requested
for (iline in (1:nlines)[mcmcVec]) {
imodel <- models[iline]
# reset values to NA for mcmc columns only
cols <- imodel
SpawnBioLower[, cols] <- SpawnBioUpper[, cols] <- SpawnBio[, cols] <- NA
BratioLower[, cols] <- BratioUpper[, cols] <- Bratio[, cols] <- NA
SPRratioLower[, cols] <- SPRratioUpper[, cols] <- SPRratio[, cols] <- NA
recruitsLower[, cols] <- recruitsUpper[, cols] <- recruits[, cols] <- NA
recdevsLower[, cols] <- recdevsUpper[, cols] <- recdevs[, cols] <- NA
### get MCMC for SpawnBio
tmp <- grep("SSB", names(mcmc[[imodel]])) # try it to see what you get
# exclude rows that aren't part of the timseries
tmp2 <- c(
grep("SSB_unfished", names(mcmc[[imodel]]), ignore.case = TRUE),
grep("SSB_Btgt", names(mcmc[[imodel]]), ignore.case = TRUE),
grep("SSB_SPRtgt", names(mcmc[[imodel]]), ignore.case = TRUE),
grep("SSB_MSY", names(mcmc[[imodel]]), ignore.case = TRUE)
)
tmp <- setdiff(tmp, tmp2)
if (length(tmp) > 0) { # there are some mcmc values to use
# subset of columns from MCMC for this model
mcmc.tmp <- mcmc[[imodel]][, tmp]
mcmclabs <- names(mcmc.tmp)
lower <- apply(mcmc.tmp, 2, quantile, prob = lowerCI, na.rm = TRUE)
med <- apply(mcmc.tmp, 2, quantile, prob = 0.5, na.rm = TRUE)
upper <- apply(mcmc.tmp, 2, quantile, prob = upperCI, na.rm = TRUE)
SpawnBio[, imodel] <- med[match(SpawnBio[["Label"]], mcmclabs)]
SpawnBioLower[, imodel] <- lower[match(SpawnBioLower[["Label"]], mcmclabs)]
SpawnBioUpper[, imodel] <- upper[match(SpawnBioUpper[["Label"]], mcmclabs)]
}
### get MCMC for Bratio
tmp <- grep("Bratio", names(mcmc[[imodel]])) # try it to see what you get
if (length(tmp) > 0) { # there are some mcmc values to use
# subset of columns from MCMC for this model
mcmc.tmp <- mcmc[[imodel]][, tmp]
mcmclabs <- names(mcmc.tmp)
lower <- apply(mcmc.tmp, 2, quantile, prob = lowerCI, na.rm = TRUE)
med <- apply(mcmc.tmp, 2, quantile, prob = 0.5, na.rm = TRUE)
upper <- apply(mcmc.tmp, 2, quantile, prob = upperCI, na.rm = TRUE)
Bratio[, imodel] <- med[match(Bratio[["Label"]], mcmclabs)]
BratioLower[, imodel] <- lower[match(BratioLower[["Label"]], mcmclabs)]
BratioUpper[, imodel] <- upper[match(BratioUpper[["Label"]], mcmclabs)]
}
### get MCMC for SPRratio
# try it to see what you get
tmp <- grep("SPRratio", names(mcmc[[imodel]]))
if (length(tmp) > 0) { # there are some mcmc values to use
# subset of columns from MCMC for this model
mcmc.tmp <- mcmc[[imodel]][, tmp]
mcmclabs <- names(mcmc.tmp)
lower <- apply(mcmc.tmp, 2, quantile, prob = lowerCI, na.rm = TRUE)
med <- apply(mcmc.tmp, 2, quantile, prob = 0.5, na.rm = TRUE)
upper <- apply(mcmc.tmp, 2, quantile, prob = upperCI, na.rm = TRUE)
SPRratio[, imodel] <- med[match(SPRratio[["Label"]], mcmclabs)]
SPRratioLower[, imodel] <- lower[match(SPRratioLower[["Label"]], mcmclabs)]
SPRratioUpper[, imodel] <- upper[match(SPRratioUpper[["Label"]], mcmclabs)]
}
### get MCMC for recruits
tmp <- grep("^Recr_", names(mcmc[[imodel]])) # try it to see what you get
# exclude rows that aren't part of the timseries
tmp2 <- grep("Recr_unfished", names(mcmc[[imodel]]), ignore.case = TRUE)
tmp <- setdiff(tmp, tmp2)
if (length(tmp) > 0) { # there are some mcmc values to use
# subset of columns from MCMC for this model
mcmc.tmp <- mcmc[[imodel]][, tmp]
mcmclabs <- names(mcmc.tmp)
lower <- apply(mcmc.tmp, 2, quantile, prob = lowerCI, na.rm = TRUE)
med <- apply(mcmc.tmp, 2, quantile, prob = 0.5, na.rm = TRUE)
# mean recruitment should be more comparable
mean <- apply(mcmc.tmp, 2, mean, na.rm = TRUE)
upper <- apply(mcmc.tmp, 2, quantile, prob = upperCI, na.rm = TRUE)
if (!meanRecWarning) {
message(
"note: using mean recruitment from MCMC instead of median,\n",
"because it is more comparable to MLE\n"
)
meanRecWarning <- TRUE
}
recruits[, imodel] <- mean[match(recruits[["Label"]], mcmclabs)]
recruitsLower[, imodel] <- lower[match(recruitsLower[["Label"]], mcmclabs)]
recruitsUpper[, imodel] <- upper[match(recruitsUpper[["Label"]], mcmclabs)]
}
### get MCMC for recdevs
# get values from mcmc to replace
tmp <- unique(c(
grep("_RecrDev_", names(mcmc[[imodel]])),
grep("_InitAge_", names(mcmc[[imodel]])),
grep("ForeRecr_", names(mcmc[[imodel]]))
))
if (length(tmp) > 0) { # there are some mcmc values to use
# subset of columns from MCMC for this model
mcmc.tmp <- mcmc[[imodel]][, tmp]
mcmclabs <- names(mcmc.tmp)
lower <- apply(mcmc.tmp, 2, quantile, prob = lowerCI, na.rm = TRUE)
med <- apply(mcmc.tmp, 2, quantile, prob = 0.5, na.rm = TRUE)
upper <- apply(mcmc.tmp, 2, quantile, prob = upperCI, na.rm = TRUE)
recdevs[, imodel] <- med[match(recdevs[["Label"]], mcmclabs)]
recdevsLower[, imodel] <- lower[match(recdevsLower[["Label"]], mcmclabs)]
recdevsUpper[, imodel] <- upper[match(recdevsUpper[["Label"]], mcmclabs)]
}
}
if (is.null(endyrvec)) {
endyrvec <- endyrs + 1
}
if (length(endyrvec) == 1) {
endyrvec <- rep(endyrvec, nlines)
}
# not sure why there should be NA values for Yr column in recdevs,
# but old code to eliminate the devs past endyr wasn't working as
# configured before
recdevs <- recdevs[!is.na(recdevs[["Yr"]]), ]
recdevsLower <- recdevsLower[!is.na(recdevsLower[["Yr"]]), ]
recdevsUpper <- recdevsUpper[!is.na(recdevsUpper[["Yr"]]), ]
# change to NA any values beyond endyr
if (!is.null(endyrvec)) {
for (iline in 1:nlines) {
endyr <- endyrvec[iline]
imodel <- models[iline]
SpawnBio[SpawnBio[["Yr"]] > endyr, imodel] <- NA
SpawnBioLower[SpawnBio[["Yr"]] > endyr, imodel] <- NA
SpawnBioUpper[SpawnBio[["Yr"]] > endyr, imodel] <- NA
Bratio[Bratio[["Yr"]] > endyr, imodel] <- NA
BratioLower[Bratio[["Yr"]] > endyr, imodel] <- NA
BratioUpper[Bratio[["Yr"]] > endyr, imodel] <- NA
#### note: add generalized startyrvec option in the future
## if(exists("startyrvec")){
## startyr <- startyrvec[iline]
## Bratio[Bratio[["Yr"]] < startyr, imodel] <- NA
## BratioLower[Bratio[["Yr"]] < startyr, imodel] <- NA
## BratioUpper[Bratio[["Yr"]] < startyr, imodel] <- NA
## }
SPRratio[SPRratio[["Yr"]] >= endyr, imodel] <- NA
SPRratioLower[SPRratio[["Yr"]] >= endyr, imodel] <- NA
SPRratioUpper[SPRratio[["Yr"]] >= endyr, imodel] <- NA
Fvalue[Fvalue[["Yr"]] >= endyr, imodel] <- NA
FvalueLower[Fvalue[["Yr"]] >= endyr, imodel] <- NA
FvalueUpper[Fvalue[["Yr"]] >= endyr, imodel] <- NA
recruits[recruits[["Yr"]] > endyr, imodel] <- NA
recruitsLower[recruits[["Yr"]] > endyr, imodel] <- NA
recruitsUpper[recruits[["Yr"]] > endyr, imodel] <- NA
if (!is.null(recdevs)) {
recdevs[recdevs[["Yr"]] > endyr, imodel] <- NA
recdevsLower[recdevs[["Yr"]] > endyr, imodel] <- NA
recdevsUpper[recdevs[["Yr"]] > endyr, imodel] <- NA
}
}
}
# function to add shaded uncertainty intervals behind line
# requires the existence of the TRUE/FALSE vector "uncertainty"
addpoly <- function(yrvec, lower, upper) {
lower[lower < 0] <- 0 # max of value or 0
for (iline in (1:nlines)[uncertainty]) {
imodel <- models[iline]
good <- !is.na(lower[, imodel]) & !is.na(upper[, imodel])
polygon(
x = c(yrvec[good], rev(yrvec[good])),
y = c(lower[good, imodel], rev(upper[good, imodel])),
border = NA, col = shadecol[iline]
)
# lines(yrvec[good],lower[good,imodel],lty=3,col=col[iline])
# lines(yrvec[good],upper[good,imodel],lty=3,col=col[iline])
}
}
## equ <- -(1:2) # IGT 2020/3/12: this variable seems to not be used
# function to plot spawning biomass
plotSpawnBio <- function(show_uncertainty = TRUE) {
# only show uncertainty if values are present for at least one model
if (!any(uncertainty)) {
show_uncertainty <- FALSE
}
# get axis limits
if (is.null(xlim)) {
if (show_equilibrium) {
xlim <- range(SpawnBio[["Yr"]])
} else {
xlim <- range(SpawnBio[["Yr"]][-c(1, 2)])
}
if (!is.null(endyrvec) & all(endyrvec < max(xlim))) {
xlim[2] <- max(endyrvec)
}
}
ylim <- ylimAdj * range(0, SpawnBio[
SpawnBio[["Yr"]] >= xlim[1] &
SpawnBio[["Yr"]] <= xlim[2],
models
], na.rm = TRUE)
if (show_uncertainty) {
ylim <- range(ylim, ylimAdj * SpawnBioUpper[
SpawnBio[["Yr"]] >= xlim[1] &
SpawnBio[["Yr"]] <= xlim[2],
models[uncertainty]
], na.rm = TRUE)
}
# set units on spawning biomass plot
if (length(unique(SpawnOutputUnits)) != 1) {
warning(
"Some models may have different units",
" for spawning output than others"
)
}
if (any(SpawnOutputUnits == "numbers")) {
ylab <- labels[12] # numbers
} else {
ylab <- labels[2] # biomass
}
# do some scaling of y-axis
yunits <- 1
if (rescale & ylim[2] > 1e3 & ylim[2] < 1e6) {
yunits <- 1e3
ylab <- gsub("(t)", "(x1000 t)", ylab, fixed = TRUE)
ylab <- gsub("eggs", "x1000 eggs", ylab, fixed = TRUE)
}
if (rescale & ylim[2] > 1e6) {
yunits <- 1e6
ylab <- gsub("(t)", "(million t)", ylab, fixed = TRUE)
ylab <- gsub("eggs", "millions of eggs", ylab, fixed = TRUE)
}
if (rescale & ylim[2] > 1e9) {
yunits <- 1e9
ylab <- gsub("million", "billion", ylab, fixed = TRUE)
}
if (!add) {
plot(0,
type = "n", xlim = xlim, ylim = ylim, xlab = labels[1], ylab = ylab,
xaxs = xaxs, yaxs = yaxs, axes = FALSE
)
}
if (show_uncertainty) {
# add shading for undertainty
addpoly(
yrvec = SpawnBio[["Yr"]][-(1:2)], lower = SpawnBioLower[-(1:2), ],
upper = SpawnBioUpper[-(1:2), ]
)
# equilibrium spawning biomass year by model
xEqu <- SpawnBio[["Yr"]][2] - (1:nlines) / nlines
} else {
# equilibrium spawning biomass year by model
xEqu <- rep(SpawnBio[["Yr"]][2], nlines)
}
# draw points and lines
if (spacepoints %in% c(0, 1, FALSE)) { # don't spread out points
matplot(SpawnBio[["Yr"]][-(1:2)], SpawnBio[-(1:2), models],
col = col, pch = pch, lty = lty, lwd = lwd, type = type,
ylim = ylim, add = TRUE
)
} else {
# spread out points with interval equal to spacepoints and
# staggering equal to staggerpoints
matplot(SpawnBio[["Yr"]][-(1:2)], SpawnBio[-(1:2), models],
col = col, lty = lty, lwd = lwd, type = "l", ylim = ylim, add = TRUE
)
SpawnBio2 <- SpawnBio
for (iline in 1:nlines) {
imodel <- models[iline]
SpawnBio2[
(SpawnBio2[["Yr"]] - initpoint) %% spacepoints !=
(staggerpoints * iline) %% spacepoints,
imodel
] <- NA
}
matplot(SpawnBio2[["Yr"]][-(1:2)], SpawnBio2[-(1:2), models],
col = col, pch = pch, lwd = lwd, type = "p", ylim = ylim, add = TRUE
)
}
if (show_equilibrium) {
## add arrows for equilibrium values
old_warn <- options()$warn # previous setting
options(warn = -1) # turn off "zero-length arrow" warning
if (show_uncertainty) {
arrows(
x0 = xEqu[models[uncertainty]],
y0 = as.numeric(SpawnBioLower[1, models[uncertainty]]),
x1 = xEqu[models[uncertainty]],
y1 = as.numeric(SpawnBioUpper[1, models[uncertainty]]),
length = 0.01, angle = 90, code = 3, col = col[uncertainty],
lwd = 2
)
}
options(warn = old_warn) # returning to old value
## add points at equilibrium values
points(
x = xEqu, SpawnBio[1, models], col = col, pch = pch,
cex = 1.2, lwd = lwd
)
}
# add axes
if (!add) {
abline(h = 0, col = "grey")
if (tickEndYr) { # include ending year in axis labels
# default tick positions if axis(1) were run
ticks <- graphics::axTicks(1)
# make axis (excluding anything after the max ending year)
axis(1, at = c(ticks[ticks < max(endyrvec)], max(endyrvec)))
} else {
# nothing special (may include labels beyond the ending year)
axis(1)
}
# add shaded area over forecast years if more than 1 forecast year shown
if (!is.null(endyrvec) &
max(endyrvec) > 1 + max(endyrs) &
shadeForecast) {
rect(
xleft = max(endyrs) + 1, ybottom = par()$usr[3],
xright = par()$usr[2], ytop = par()$usr[4],
col = gray(0, alpha = 0.1), border = NA
)
}
yticks <- pretty(ylim)
axis(2, at = yticks, labels = format(yticks / yunits), las = 1)
box()
}
if (legend) {
# add legend if requested
add_legend(legendlabels,
legendloc = legendloc,
legendorder = legendorder,
legendncol = legendncol,
col = col,
pch = pch,
lwd = lwd,
lty = lty
)
}
# return upper y-limit
return(ylim[2])
}
# function to plot biomass ratio (may be identical to previous plot)
plotBratio <- function(show_uncertainty = TRUE) {
# only show uncertainty if values are present for at least one model
if (!any(uncertainty)) {
show_uncertainty <- FALSE
}
# get axis limits
if (is.null(xlim)) {
xlim <- range(Bratio[["Yr"]])
if (!is.null(endyrvec) & all(endyrvec < max(xlim))) {
xlim[2] <- max(endyrvec)
}
}
ylim <- ylimAdj * range(0, Bratio[
Bratio[["Yr"]] >= xlim[1] &
Bratio[["Yr"]] <= xlim[2],
models
], na.rm = TRUE)
if (show_uncertainty) {
ylim <- ylimAdj * range(ylim / ylimAdj, BratioUpper[
Bratio[["Yr"]] >= xlim[1] &
Bratio[["Yr"]] <= xlim[2],
models[uncertainty]
], na.rm = TRUE)
}
# make plot
if (!add) {
plot(0,
type = "n", xlim = xlim, ylim = ylim,
xlab = labels[1], ylab = labels[3],
xaxs = xaxs, yaxs = yaxs, axes = FALSE
)
}
if (show_uncertainty) {
addpoly(Bratio[["Yr"]], lower = BratioLower, upper = BratioUpper)
}
if (spacepoints %in% c(0, 1, FALSE)) { # don't spread out points
matplot(Bratio[["Yr"]], Bratio[, models],
col = col, pch = pch,
lty = lty, lwd = lwd, type = type, ylim = ylim, add = TRUE
)
} else {
# spread out points with interval equal to spacepoints and
# staggering equal to staggerpoints
matplot(Bratio[["Yr"]], Bratio[, models],
col = col, pch = pch,
lty = lty, lwd = lwd, type = "l", ylim = ylim, add = TRUE
)
if (type != "l") {
Bratio2 <- Bratio
for (iline in 1:nlines) {
imodel <- models[iline]
Bratio2[(Bratio2[["Yr"]] - initpoint) %% spacepoints !=
(staggerpoints * iline) %% spacepoints, imodel] <- NA
}
matplot(Bratio2[["Yr"]], Bratio2[, models],
col = col, pch = pch,
lty = lty, lwd = lwd, type = "p", ylim = ylim, add = TRUE
)
}
}
yticks <- pretty(par()$yaxp[1:2])
if (btarg > 0) {
abline(h = btarg, col = "red", lty = 2)
text(min(Bratio[["Yr"]]) + 4, btarg + 0.03, labels[10], adj = 0)
yticks <- sort(c(btarg, yticks))
}
if (minbthresh > 0) {
abline(h = minbthresh, col = "red", lty = 2)
text(min(Bratio[["Yr"]]) + 4, minbthresh + 0.03, labels[11], adj = 0)
yticks <- sort(c(minbthresh, yticks))
}
if (!add) {
abline(h = 0, col = "grey")
abline(h = 1, col = "grey", lty = 2)
if (tickEndYr) { # include ending year in axis labels
# default tick positions if axis(1) were run
ticks <- graphics::axTicks(1)
# make axis (excluding anything after the max ending year)
axis(1, at = c(ticks[ticks < max(endyrvec)], max(endyrvec)))
} else {
# nothing special (may include labels beyond the ending year)
axis(1)
}
# add shaded area over forecast years if more than 1 forecast year shown
if (!is.null(endyrvec) &
max(endyrvec) > 1 + max(endyrs) &
shadeForecast) {
rect(
xleft = max(endyrs) + 1, ybottom = par()$usr[3],
xright = par()$usr[2], ytop = par()$usr[4],
col = gray(0, alpha = 0.1), border = NA
)
}
axis(2, at = yticks, las = 1)
box()
}
if (legend) {
# add legend if requested
add_legend(legendlabels,
legendloc = legendloc,
legendorder = legendorder,
legendncol = legendncol,
col = col,
pch = pch,
lwd = lwd,
lty = lty
)
}
# return upper y-limit
return(ylim[2])
}
plotSPRratio <- function(show_uncertainty = TRUE) {
# plot SPR quantity (may be ratio or raw value)
# only show uncertainty if values are present for at least one model
if (!any(uncertainty)) {
show_uncertainty <- FALSE
}
# get axis limits
if (is.null(xlim)) {
xlim <- range(SPRratio[["Yr"]])
if (!is.null(endyrvec) & all(endyrvec < max(xlim))) {
xlim[2] <- max(endyrvec)
}
}
ylim <- ylimAdj * range(0, SPRratio[
SPRratio[["Yr"]] >= xlim[1] &
SPRratio[["Yr"]] <= xlim[2],
models
], na.rm = TRUE)
if (show_uncertainty) {
ylim <- ylimAdj * range(ylim / ylimAdj,
SPRratioUpper[
SPRratio[["Yr"]] >= xlim[1] &
SPRratio[["Yr"]] <= xlim[2],
models[uncertainty]
],
na.rm = TRUE
)
}
par(par)
# make plot
if (!add) {
if (isTRUE(!is.na(SPRratioLabel) &&
SPRratioLabel ==
paste0("(1-SPR)/(1-SPR_", floor(100 * sprtarg), "%)"))) {
# add to right-hand outer margin to make space
# for second vertical axis
# store current margin parameters
# save old margins
newmar <- oldmar <- par()$mar
# match right-hand margin value to left-hand value
newmar[4] <- newmar[2]
# update graphics parameters
par(mar = newmar)
}
plot(0,
type = "n", xlim = xlim, ylim = ylim, xlab = labels[1],
ylab = "", xaxs = xaxs, yaxs = yaxs, las = 1, axes = FALSE
)
axis(2)
}
if (show_uncertainty) {
addpoly(SPRratio[["Yr"]], lower = SPRratioLower, upper = SPRratioUpper)
}
if (spacepoints %in% c(0, 1, FALSE)) { # don't spread out points
matplot(SPRratio[["Yr"]], SPRratio[, models],
col = col, pch = pch,
lty = lty, lwd = lwd, type = type, ylim = ylim, add = TRUE
)
} else {
# spread out points with interval equal to spacepoints and
# staggering equal to staggerpoints
matplot(SPRratio[["Yr"]], SPRratio[, models],
col = col, pch = pch,
lty = lty, lwd = lwd, type = "l", ylim = ylim, add = TRUE
)
if (type != "l") {
SPRratio2 <- SPRratio
for (iline in 1:nlines) {
imodel <- models[iline]
SPRratio2[(SPRratio2[["Yr"]] - initpoint) %% spacepoints !=
(staggerpoints * iline) %% spacepoints, imodel] <- NA
}
matplot(SPRratio2[["Yr"]], SPRratio2[, models],
col = col, pch = pch,
lty = lty, lwd = lwd, type = "p", ylim = ylim, add = TRUE
)
}
}
abline(h = 0, col = "grey")
if (sprtarg > 0) {
if (isTRUE(SPRratioLabel == "1-SPR")) {
# if starter file chooses raw SPR as the option for reporting,
# don't show ratio
abline(h = sprtarg, col = "red", lty = 2)
text(SPRratio[["Yr"]][1] + 4, (sprtarg + 0.03), labels[10],
adj = 0
)
mtext(
side = 2, text = SPRratioLabel,
line = par()$mgp[1], col = par()$col.lab, cex = par()$cex.lab
)
} else {
# draw line at sprtarg
yticks <- pretty(ylim)
if (isTRUE(!is.na(SPRratioLabel) &&
SPRratioLabel == paste0(
"(1-SPR)/(1-SPR_",
floor(100 * sprtarg), "%)"
))) {
# add right-hand vertical axis showing 1-SPR
abline(h = 1, col = "red", lty = 2)
text(SPRratio[["Yr"]][1] + 4, 1 + 0.03, labels[10], adj = 0)
axis(4, at = yticks, labels = yticks * (1 - sprtarg), las = 1)
mtext(
side = 4, text = "1 - SPR",
line = par()$mgp[1], col = par()$col.lab, cex = par()$cex.lab
)
# line below has round to be more accurate
# than the floor which is used
# in the test above and in SS
mtext(
side = 2, text = paste("(1-SPR)/(1-SPR_", 100 * sprtarg, "%)", sep = ""),
line = par()$mgp[1], col = par()$col.lab, cex = par()$cex.lab
)
} else {
message(
"No line added to SPR ratio plot, ",
"as the settings used in this model ",
"have not yet been configured in SSplotComparisons."
)
mtext(
side = 2, text = SPRratioLabel,
line = par()$mgp[1], col = par()$col.lab, cex = par()$cex.lab
)
}
}
} else {
mtext(
side = 2, text = SPRratioLabel,
line = par()$mgp[1], col = par()$col.lab, cex = par()$cex.lab
)
}
if (!add) {
if (tickEndYr) { # include ending year in axis labels
# default tick positions if axis(1) were run
ticks <- graphics::axTicks(1)
# make axis (excluding anything after the max ending year)
axis(1, at = c(ticks[ticks < max(endyrvec)], max(endyrvec)))
} else {
# nothing special (may include labels beyond the ending year)
axis(1)
}
# add shaded area over forecast years if more than 1 forecast year shown
if (!is.null(endyrvec) &
max(endyrvec) > 1 + max(endyrs) &
shadeForecast) {
rect(
xleft = max(endyrs) + 1, ybottom = par()$usr[3],
xright = par()$usr[2], ytop = par()$usr[4],
col = gray(0, alpha = 0.1), border = NA
)
}
}
if (legend) {
# add legend if requested
add_legend(legendlabels,
legendloc = legendloc,
legendorder = legendorder,
legendncol = legendncol,
col = col,
pch = pch,
lwd = lwd,
lty = lty
)
}
box()
if (exists("oldmar")) {
# restore old margin parameters
par(mar = oldmar)
}
# return upper y-limit
return(ylim[2])
}
#### fishing mortality (however it is specified in the models)
plotF <- function(show_uncertainty = TRUE) {
# plot biomass ratio (may be identical to previous plot)
# only show uncertainty if values are present for at least one model
if (!any(uncertainty)) {
show_uncertainty <- FALSE
}
# get axis limits
if (is.null(xlim)) {
xlim <- range(Fvalue[["Yr"]])
if (!is.null(endyrvec) & all(endyrvec < max(xlim))) {
xlim[2] <- max(endyrvec)
}
}
ylim <- ylimAdj * range(0, Fvalue[
Fvalue[["Yr"]] >= xlim[1] &
Fvalue[["Yr"]] <= xlim[2],
models
], na.rm = TRUE)
if (show_uncertainty) {
ylim <- ylimAdj * range(ylim / ylimAdj,
FvalueUpper[
Fvalue[["Yr"]] >= xlim[1] &
Fvalue[["Yr"]] <= xlim[2],
models[uncertainty]
],
na.rm = TRUE
)
}
par(par)
# make plot
if (!add) {
plot(0,
type = "n", xlim = xlim, ylim = ylim, xlab = labels[1],
ylab = "", xaxs = xaxs, yaxs = yaxs, las = 1, axes = FALSE
)
if (tickEndYr) { # include ending year in axis labels
# default tick positions if axis(1) were run
ticks <- graphics::axTicks(1)
# make axis (excluding anything after the max ending year)
axis(1, at = c(ticks[ticks < max(endyrvec)], max(endyrvec)))
} else {
# nothing special (may include labels beyond the ending year)
axis(1)
}
axis(2)
}
if (show_uncertainty) {
addpoly(Fvalue[["Yr"]], lower = FvalueLower, upper = FvalueUpper)
}
if (spacepoints %in% c(0, 1, FALSE)) { # don't spread out points
matplot(Fvalue[["Yr"]], Fvalue[, models],
col = col, pch = pch,
lty = lty, lwd = lwd, type = type, ylim = ylim, add = TRUE
)
} else {
# spread out points with interval equal to spacepoints and
# staggering equal to staggerpoints
matplot(Fvalue[["Yr"]], Fvalue[, models],
col = col, pch = pch,
lty = lty, lwd = lwd, type = "l", ylim = ylim, add = TRUE
)
if (type != "l") {
Fvalue2 <- Fvalue
for (iline in 1:nlines) {
imodel <- models[iline]
Fvalue2[Fvalue2[["Yr"]] %% spacepoints !=
(staggerpoints * iline) %% spacepoints, imodel] <- NA
}
matplot(Fvalue2[["Yr"]], Fvalue2[, models],
col = col, pch = pch,
lty = lty, lwd = lwd, type = "p", ylim = ylim, add = TRUE
)
}
}
abline(h = 0, col = "grey")
mtext(
side = 2, text = FvalueLabel,
line = par()$mgp[1], col = par()$col.lab, cex = par()$cex.lab
)
box()
if (legend) {
add_legend(legendlabels,
legendloc = legendloc,
legendorder = legendorder,
legendncol = legendncol,
col = col,
pch = pch,
lwd = lwd,
lty = lty
)
}
# return upper y-limit
return(ylim[2])
}
plotRecruits <- function(show_uncertainty = TRUE, recruit_lines = TRUE) {
# plot recruitment
# only show uncertainty if values are present for at least one model
if (!any(uncertainty)) {
show_uncertainty <- FALSE
}
# determine x-limits
if (is.null(xlim)) {
if (show_equilibrium) {
xlim <- range(recruits[["Yr"]])
} else {
xlim <- range(recruits[["Yr"]][-c(1, 2)])
}
if (!is.null(endyrvec) & all(endyrvec < max(xlim))) {
xlim[2] <- max(endyrvec)
}
}
# determine y-limits
ylim <- ylimAdj * range(0, recruits[
recruits[["Yr"]] >= xlim[1] &
recruits[["Yr"]] <= xlim[2],
models
], na.rm = TRUE)
if (show_uncertainty) {
ylim <- ylimAdj * range(ylim / ylimAdj,
recruitsUpper[
recruits[["Yr"]] >= xlim[1] &
recruits[["Yr"]] <= xlim[2],
models[uncertainty]
],
na.rm = TRUE
)
}
# do some automatic scaling of the units
ylab <- labels[4]
yunits <- 1
if (ylim[2] > 1e3 & ylim[2] < 1e6) {
# if max recruits a million and a billion
yunits <- 1e3
ylab <- gsub("1,000s", "millions", ylab)
}
if (ylim[2] > 1e6) {
# if max is greater than a billion (e.g. pacific hake)
yunits <- 1e6
ylab <- gsub("1,000s", "billions", ylab)
}
# plot lines showing recruitment
if (spacepoints %in% c(0, 1, FALSE)) { # don't spread out points
matplot(recruits[["Yr"]][-(1:2)], recruits[-(1:2), models],
col = col, pch = pch, lty = lty, lwd = lwd, type = type,
xlim = xlim, ylim = ylim,
xlab = labels[1], ylab = ylab, xaxs = xaxs, yaxs = yaxs,
axes = FALSE, add = add
)
} else {
# spread out points with interval equal to spacepoints and
# staggering equal to staggerpoints
matplot(recruits[["Yr"]][-(1:2)], recruits[-(1:2), models],
col = col, pch = pch, lty = lty, lwd = lwd, type = "l",
xlim = xlim, ylim = ylim,
xlab = labels[1], ylab = ylab, xaxs = xaxs, yaxs = yaxs,
axes = FALSE, add = add
)
if (type != "l") {
recruits2 <- recruits
for (iline in 1:nlines) {
imodel <- models[iline]
recruits2[(recruits2[["Yr"]] %% spacepoints - initpoint) !=
(staggerpoints * iline) %% spacepoints, imodel] <- NA
}
matplot(recruits2[["Yr"]][-(1:2)], recruits2[-(1:2), models],
col = col, pch = pch, lty = lty, lwd = lwd, type = "p",
xlab = labels[1], ylab = ylab, xaxs = xaxs, yaxs = yaxs,
axes = FALSE, ylim = ylim, add = TRUE
)
}
}
## Add points at equilibrium values. Note: I adapted this logic from the
## SSB plot above.
if (show_uncertainty) {
xEqu <- recruits[["Yr"]][2] - (1:nlines) / nlines
} else {
xEqu <- rep(recruits[["Yr"]][1], nlines)
}
if (show_equilibrium) {
points(
x = xEqu, y = recruits[1, models], col = col, pch = pch,
cex = 1.2, lwd = lwd
)
}
# add uncertainty intervals when requested
if (show_uncertainty) {
for (iline in 1:nlines) {
imodel <- models[iline]
if (uncertainty[imodel]) {
## plot all but equilbrium values
xvec <- recruits[["Yr"]]
if (nlines > 1) xvec <- xvec + 0.4 * iline / nlines - 0.2
old_warn <- options()$warn # previous setting
options(warn = -1) # turn off "zero-length arrow" warning
# arrows (-2 in vectors below is to remove initial year recruitment)
arrows(
x0 = xvec[-c(1, 2)],
y0 = pmax(as.numeric(recruitsLower[-c(1, 2), imodel]), 0),
x1 = xvec[-c(1, 2)],
y1 = as.numeric(recruitsUpper[-c(1, 2), imodel]),
length = 0.01, angle = 90, code = 3, col = col[imodel]
)
options(warn = old_warn) # returning to old value
if (show_equilibrium) {
arrows(
x0 = xEqu[imodel],
y0 = pmax(as.numeric(recruitsLower[1, imodel]), 0),
x1 = xEqu[imodel],
y1 = as.numeric(recruitsUpper[1, imodel]),
length = 0.01, angle = 90, code = 3, col = col[imodel]
)
}
}
}
}
abline(h = 0, col = "grey")
if (legend) {
# add legend if requested
add_legend(legendlabels,
legendloc = legendloc,
legendorder = legendorder,
legendncol = legendncol,
col = col,
pch = pch,
lwd = lwd,
lty = lty
)
}
if (!add) {
if (tickEndYr) { # include ending year in axis labels
# default tick positions if axis(1) were run
ticks <- graphics::axTicks(1)
# make axis (excluding anything after the max ending year)
axis(1, at = c(ticks[ticks < max(endyrvec)], max(endyrvec)))
} else {
# nothing special (may include labels beyond the ending year)
axis(1)
}
# add shaded area over forecast years if more than 1 forecast year shown
if (!is.null(endyrvec) &
max(endyrvec) > 1 + max(endyrs) &
shadeForecast) {
rect(
xleft = max(endyrs) + 1, ybottom = par()$usr[3],
xright = par()$usr[2], ytop = par()$usr[4],
col = gray(0, alpha = 0.1), border = NA
)
}
yticks <- pretty(ylim)
axis(2, at = yticks, labels = format(yticks / yunits), las = 1)
box()
}
# return upper y-limit
return(ylim[2])
}
plotRecDevs <- function(show_uncertainty = TRUE) { # plot recruit deviations
# test for bad values
if (any(is.na(recdevs[["Yr"]]))) {
warning("Recdevs associated with initial age structure may not be shown")
}
# only show uncertainty if values are present for at least one model
if (!any(uncertainty)) {
show_uncertainty <- FALSE
}
# empty plot
if (is.null(xlim)) {
xlim <- range(recdevs[["Yr"]], na.rm = TRUE)
if (!is.null(endyrvec) & all(endyrvec < max(xlim))) {
xlim[2] <- max(endyrvec)
}
}
ylim <- ylimAdj * range(recdevs[
recdevs[["Yr"]] >= xlim[1] &
recdevs[["Yr"]] <= xlim[2],
models
], na.rm = TRUE)
if (any(is.infinite(ylim))) {
warning(
"Skipping recdev plots. Infinite ylim may indicate ",
'all values are NA in summaryoutput[["recdevs"]]'
)
return(ylim[2])
}
if (show_uncertainty) {
if (all(is.na(recdevsLower[, models]))) {
# can't do uncertainty if no range present
return(invisible(NA))
}
ylim <- ylimAdj * range(recdevsLower[
recdevs[["Yr"]] >= xlim[1] &
recdevs[["Yr"]] <= xlim[2],
models
],
recdevsUpper[
recdevs[["Yr"]] >= xlim[1] &
recdevs[["Yr"]] <= xlim[2],
models
],
na.rm = TRUE
)
}
ylim <- range(-ylim, ylim) # make symmetric
if (!add) {
plot(0,
xlim = xlim, ylim = ylim, axes = FALSE,
type = "n", xlab = labels[1], ylab = labels[5], xaxs = xaxs,
yaxs = yaxs, las = 1
)
axis(2, las = 1)
abline(h = 0, col = "grey")
}
if (show_uncertainty) {
for (iline in 1:nlines) {
imodel <- models[iline]
if (uncertainty[imodel]) {
xvec <- recdevs[["Yr"]]
if (nlines > 1) xvec <- xvec + 0.4 * iline / nlines - 0.2
arrows(
x0 = xvec, y0 = as.numeric(recdevsLower[, imodel]),
x1 = xvec, y1 = as.numeric(recdevsUpper[, imodel]),
length = 0.01, angle = 90, code = 3, col = col[iline]
)
}
}
}
# loop over vector of models to add lines
for (iline in 1:nlines) {
imodel <- models[iline]
yvec <- recdevs[, imodel]
xvec <- recdevs[["Yr"]]
points(xvec, yvec, pch = pch[iline], lwd = lwd[iline], col = col[iline])
}
if (!add) {
if (tickEndYr) { # include ending year in axis labels
# default tick positions if axis(1) were run
ticks <- graphics::axTicks(1)
# make axis (excluding anything after the max ending year)
axis(1, at = c(ticks[ticks < max(endyrvec)], max(endyrvec)))
} else {
# nothing special (may include labels beyond the ending year)
axis(1)
}
# add shaded area over forecast years if more than 1 forecast year shown
if (!is.null(endyrvec) &
max(endyrvec) > 1 + max(endyrs) &
shadeForecast) {
rect(
xleft = max(endyrs) + 1, ybottom = par()$usr[3],
xright = par()$usr[2], ytop = par()$usr[4],
col = gray(0, alpha = 0.1), border = NA
)
}
box()
}
if (legend) {
# add legend if requested
add_legend(legendlabels,
legendloc = legendloc,
legendorder = legendorder,
legendncol = legendncol,
col = col,
pch = pch,
lwd = lwd,
lty = lty
)
}
# return upper y-limit
return(ylim[2])
}
## xmax <- 1.1*max(reldep)
## ymax <- 1.1*max(1,relspr[!is.na(relspr)])
## ylab <- managementratiolabels[1,2]
## phasefunc <- function(){
## if(!add) plot(reldep,relspr,xlab="B/Btarget",
## xlim=c(0,xmax),ylim=c(0,ymax),ylab=ylab,type="n")
## lines(reldep,relspr,type="o",col=col2)
## abline(h=0,col="grey")
## abline(v=0,col="grey")
## lines(reldep,relspr,type="o",col=col2)
## points(reldep[length(reldep)],relspr[length(relspr)],col=col4,pch=19)
## abline(h=1,col=col4,lty=2)
## abline(v=1,col=col4,lty=2)}
plotPhase <- function(show_uncertainty = TRUE) {
# plot biomass ratio vs. SPRratio
# only show uncertainty if values are present for at least one model
if (!any(uncertainty)) {
show_uncertainty <- FALSE
}
# get axis limits
xlim <- range(0, ylimAdj * Bratio[, models], na.rm = TRUE)
ylim <- range(0, ylimAdj * SPRratio[, models], na.rm = TRUE)
# make plot
if (!add) {
plot(0,
type = "n", xlim = xlim, ylim = ylim, xlab = labels[3],
ylab = SPRratioLabel, xaxs = xaxs, yaxs = yaxs, las = 1
)
}
goodyrs <- intersect(Bratio[["Yr"]], SPRratio[["Yr"]])
lastyr <- max(goodyrs)
for (iline in 1:nlines) {
imodel <- models[iline]
# no option get to stagger points in phase plots,
# only the last point is marked
xvals <- Bratio[Bratio[["Yr"]] %in% goodyrs, imodel]
yvals <- SPRratio[SPRratio[["Yr"]] %in% goodyrs, imodel]
lines(xvals,
yvals,
col = col[iline],
lty = lty[iline], lwd = lwd[iline],
type = "l"
) # no user control of type to add points
# NA values and missing points will occur if final year is different
points(tail(xvals, 1),
tail(yvals, 1),
col = col[iline],
pch = pch[iline], lwd = lwd[iline]
)
}
abline(h = 1, v = 1, col = "grey", lty = 2)
if (btarg > 0) abline(v = btarg, col = "red", lty = 2)
if (sprtarg > 0) abline(h = sprtarg, col = "red", lty = 2)
if (legend) {
# add legend if requested
add_legend(legendlabels,
legendloc = legendloc,
legendorder = legendorder,
legendncol = legendncol,
col = col,
pch = pch,
lwd = lwd,
lty = lty
)
}
# return upper y-limit
return(ylim[2])
}
plotIndices <- function(log = FALSE, iindex) {
# function to plot different fits to a single index of abundance
# get a subset of index table including only 1 index per model
# (hopefully matching each other)
indices2 <- NULL
for (iline in 1:nlines) {
imodel <- models[iline]
subset2 <- indices[["imodel"]] == imodel &
indices[["Yr"]] <= endyrvec[iline] &
indices[["Fleet"]] == indexfleets[[imodel]][iindex]
indices2 <- rbind(indices2, indices[subset2, ])
}
# get quantities for plot
yr <- indices2[["Yr"]]
obs <- indices2[["Obs"]]
exp <- indices2[["Exp"]]
imodel <- indices2[["imodel"]]
Q <- indices2[["Calc_Q"]]
if (log) {
obs <- log(obs)
exp <- log(exp)
ylab <- labels[7]
} else {
ylab <- labels[6]
}
# get uncertainty intervals if requested
if (indexUncertainty) {
if (indexPlotEach) {
if (is.null(indexSEvec)) {
indexSEvec <- indices2[["SE"]]
}
y <- obs
if (log) {
upper <- qnorm(.975, mean = y, sd = indexSEvec)
lower <- qnorm(.025, mean = y, sd = indexSEvec)
} else {
upper <- qlnorm(.975, meanlog = log(y), sdlog = indexSEvec)
lower <- qlnorm(.025, meanlog = log(y), sdlog = indexSEvec)
}
} else {
subset <- indices2[["imodel"]] == models[1]
if (is.null(indexSEvec)) {
indexSEvec <- indices2[["SE"]][subset]
}
y <- obs
if (log) {
upper <- qnorm(.975, mean = y, sd = indexSEvec)
lower <- qnorm(.025, mean = y, sd = indexSEvec)
} else {
upper <- qlnorm(.975, meanlog = log(y), sdlog = indexSEvec)
lower <- qlnorm(.025, meanlog = log(y), sdlog = indexSEvec)
}
}
} else {
upper <- NULL
lower <- NULL
}
### make plot of index fits
# calculate ylim (excluding dummy observations from observed but not expected)
sub <- !is.na(indices2[["Like"]])
ylim <- range(exp, obs[sub], lower[sub], upper[sub], na.rm = TRUE)
# if no values included in subset, then set ylim based on all values
if (!any(sub)) {
ylim <- range(exp, obs, lower, upper, na.rm = TRUE)
}
if (!log) {
# 0 included if not in log space
ylim <- c(0, ylimAdj * ylim[2])
} else {
# add padding on top and bottom
ylim <- ylim + c(-1, 1) * (ylimAdj - 1) * diff(ylim)
}
meanQ <- rep(NA, nlines)
if (!add) {
if (!is.null(endyrvec)) {
xlim <- c(min(yr), max(endyrvec))
} else {
xlim <- range(yr)
}
plot(0,
type = "n", xlim = xlim, yaxs = yaxs,
ylim = ylim, xlab = "Year", ylab = ylab, axes = FALSE
)
}
if (!log & yaxs != "i") {
abline(h = 0, col = "grey")
}
Qtext <- rep("(Q =", nlines)
for (iline in (1:nlines)[!mcmcVec]) {
imodel <- models[iline]
subset <- indices2[["imodel"]] == imodel
meanQ[iline] <- mean(Q[subset])
if (indexQlabel && any(Q[subset] != mean(Q[subset]))) {
Qtext[iline] <- "(mean Q ="
}
x <- yr[subset]
y <- exp[subset]
lines(x, y,
pch = pch[iline], lwd = lwd[iline],
lty = lty[iline], col = col[iline], type = type
)
}
legendlabels2 <- legendlabels
if (indexQlabel) {
legendlabels2 <- paste(
legendlabels, Qtext,
format(meanQ, digits = indexQdigits), ")"
)
}
if (legend) {
# add legend if requested
add_legend(legendlabels,
legendloc = legendloc,
legendorder = legendorder,
legendncol = legendncol,
col = col,
pch = pch,
lwd = lwd,
lty = lty
)
}
if (indexPlotEach) {
# plot observed values for each model (staggered slightly)
for (iline in (1:nlines)[!mcmcVec]) {
adj <- 0.2 * iline / nlines - 0.1
imodel <- models[iline]
if (any(is.na(indices2[["like"]]))) {
warning("NA's found in likelihood, may cause issues with index plots")
}
subset <- indices2[["imodel"]] == imodel & !is.na(indices2[["Like"]])
# add uncertainty intervals if requested
if (indexUncertainty) {
arrows(
x0 = yr[subset] + adj, y0 = lower[subset],
x1 = yr[subset] + adj, y1 = upper[subset],
length = 0.01, angle = 90, code = 3,
# colors have hard-wired alpha value of 0.7
col = adjustcolor(col, alpha.f = 0.7)[iline]
)
}
# add points on top of intervals
points(yr[subset] + adj, obs[subset],
pch = 21, cex = 1.5, col = 1,
bg = adjustcolor(col, alpha.f = 0.7)[iline]
)
}
} else {
# plot only the first model
imodel <- models[which(endyrvec == max(endyrvec))[1]]
subset <- indices2[["imodel"]] == imodel & !is.na(indices2[["Like"]])
# add uncertainty intervals if requested
if (indexUncertainty) {
arrows(
x0 = yr[subset], y0 = lower[subset],
x1 = yr[subset], y1 = upper[subset],
length = 0.01, angle = 90, code = 3, col = 1
)
}
# add points on top of intervals
points(yr[subset], obs[subset], pch = 16, cex = 1.5)
}
# if not added to existing plot then add axis labels and box
if (!add) {
xticks <- pretty(xlim)
axis(1, at = xticks, labels = format(xticks))
if (tickEndYr) {
axis(1, at = max(endyrvec))
}
axis(2)
box()
}
# return upper y-limit
return(ylim[2])
} # end plotIndices function
plotDensities <- function(parname, xlab, denslwd, limit0 = TRUE,
cumulative = FALSE) {
if (any(!mcmcVec)) {
vals <- rbind(
pars[pars[["Label"]] == parname, names(pars) != "recdev"],
quants[quants[["Label"]] == parname, ]
)
if (nrow(vals) != 1) {
warn <- paste("problem getting values for parameter:", parname, "")
if (nrow(vals) == 0) {
warn <- paste(
warn,
"no Labels match in either parameters or derived quantities"
)
}
if (nrow(vals) > 0) {
warn <- paste(
warn,
"Too many matching Labels:",
pars[["Label"]][pars[["Label"]] == parname],
quants[["Label"]][quants[["Label"]] == parname]
)
}
warning(warn)
# previous versions had an else statement,
# but this will end the function here instead and saves indenting
return(NULL)
}
valSDs <- rbind(
parsSD[pars[["Label"]] == parname, ],
quantsSD[quants[["Label"]] == parname, ]
)
}
xmax <- xmin <- ymax <- NULL # placeholder for limits
# placeholder for the mcmc density estimates, if there are any
mcmcDens <- vector(mode = "list", length = nlines)
# loop over models to set range
good <- rep(TRUE, nlines) # indicator of which values to plot
for (iline in 1:nlines) {
imodel <- models[iline]
if (mcmcVec[iline]) {
# figure out which columns of posteriors to use
mcmcColumn <- grep(parname, colnames(mcmc[[imodel]]), fixed = TRUE)
# warn if it can't find the columns
if (length(mcmcColumn) == 0) {
message(
"No columns selected from MCMC for '", parname,
"' in model ", imodel
)
good[iline] <- FALSE
}
# warn if too many columns
if (length(mcmcColumn) > 1) {
warning(
"Too many columns selected from MCMC for model ",
imodel, ":", paste0(names(mcmc[[imodel]])[mcmcColumn],
collapse = ", "
),
". Please specify a unique label in the mcmc dataframe",
"or specify mcmcVec = FALSE for model ",
imodel, " (or mcmcVec = FALSE applying to all models). "
)
good[iline] <- FALSE
}
# add density
if (good[iline]) {
mcmcVals <- mcmc[[imodel]][, mcmcColumn]
xmin <- min(xmin, quantile(mcmcVals, 0.005, na.rm = TRUE))
if (limit0) {
xmin <- max(0, xmin) # by default no plot can go below 0
}
if (fix0 & !grepl("R0", parname)) {
xmin <- 0 # include 0 if requested (except for log(R0) plots)
}
xmax <- max(xmax, quantile(mcmcVals, 0.995, na.rm = TRUE))
# density estimate of mcmc sample (posterior)
z <- density(mcmcVals, cut = 0, adjust = densityadjust)
z[["x"]] <- z[["x"]][c(1, 1:length(z[["x"]]), length(z[["x"]]))]
# just to make sure that a good looking polygon is created
z[["y"]] <- c(0, z[["y"]], 0)
ymax <- max(ymax, max(z[["y"]])) # update ymax
mcmcDens[[iline]] <- z # save density estimate for later plotting
}
} else {
parval <- vals[1, imodel]
parSD <- valSDs[1, imodel]
if (!is.numeric(parval)) parval <- -1 # do this in case models added without the parameter
if (!is.na(parSD) && parSD > 0) { # if non-zero SD available
# update x range
xmin <- min(xmin, qnorm(0.005, parval, parSD))
if (limit0) xmin <- max(0, xmin) # by default no plot can go below 0
if (fix0 & !grepl("R0", parname)) xmin <- 0 # include 0 if requested (except for log(R0) plots)
xmax <- max(xmax, qnorm(0.995, parval, parSD))
# calculate density to get y range
x <- seq(xmin, xmax, length = 500)
mle <- dnorm(x, parval, parSD)
mlescale <- 1 / (sum(mle) * mean(diff(x)))
mle <- mle * mlescale
# update ymax
ymax <- max(ymax, max(mle))
} else { # if no SD, at least make sure interval includes MLE estimate
xmin <- min(xmin, parval)
xmax <- max(xmax, parval)
}
}
}
if (grepl("Bratio", parname)) {
xmin <- 0 # xmin=0 for relative spawning biomass plots
}
if (limit0) {
xmin <- max(0, xmin) # by default no plot can go below 0
}
if (fix0 & !grepl("R0", parname)) {
xmin <- 0 # include 0 if requested (except for log(R0) plots)
}
# calculate x-limits and vector of values for densities
xlim <- c(xmin, xmin + (xmax - xmin) * densityscalex)
x <- seq(xmin, xmax, length = 500)
# calculate some scaling stuff
xunits <- 1
if (rescale & xmax > 1e3 & xmax < 3e6) {
xunits <- 1e3
# xlab <- gsub("mt","x1000 mt",xlab)
xlab2 <- "'1000 t"
}
if (rescale & xmax > 3e6) {
xunits <- 1e6
# xlab <- gsub("mt","million mt",xlab)
xlab2 <- "million t"
}
# make empty plot
if (is.null(ymax)) {
message(
" skipping plot of ", parname,
" because it seems to not be estimated in any model"
)
} else {
par(par)
if (!add) {
if (cumulative) {
plot(0,
type = "n", xlim = xlim, axes = FALSE, xaxs = "i", yaxs = yaxs,
ylim = c(0, 1), xlab = xlab, ylab = ""
)
} else {
plot(0,
type = "n", xlim = xlim, axes = FALSE, xaxs = "i", yaxs = yaxs,
ylim = c(0, 1.1 * ymax * densityscaley), xlab = xlab, ylab = ""
)
}
}
# add vertical lines for target and threshold
# relative spawning biomass values
if (grepl("Bratio", parname)) {
if (btarg > 0) {
abline(v = btarg, col = "red", lty = 2)
text(btarg + 0.03, par()$usr[4], labels[10], adj = 1.05, srt = 90)
}
if (minbthresh > 0) {
abline(v = minbthresh, col = "red", lty = 2)
text(minbthresh + 0.03, par()$usr[4], labels[11],
adj = 1.05, srt = 90
)
}
}
symbolsQuants <- c(0.025, 0.125, 0.25, 0.5, 0.75, 0.875, 0.975)
# loop again to make plots
for (iline in (1:nlines)[good]) {
imodel <- models[iline]
if (mcmcVec[iline]) {
# make density for MCMC posterior
mcmcColumn <- grep(parname, colnames(mcmc[[imodel]]), fixed = TRUE)
mcmcVals <- mcmc[[imodel]][, mcmcColumn]
# for symbols on plot
x2 <- quantile(mcmcVals, symbolsQuants, na.rm = TRUE)
# find the positions in the density closest to these quantiles
x <- mcmcDens[[iline]][["x"]]
if (!cumulative) {
y <- mcmcDens[[iline]][["y"]]
yscale <- 1 / (sum(y) * mean(diff(x)))
y <- y * yscale
} else {
y <- cumsum(mcmcDens[[iline]][["y"]]) / sum(mcmcDens[[iline]][["y"]])
}
y2 <- NULL
for (ii in x2) {
# find y-value associated with closest matching x-value
# "min" was added for rare case where two values are equally close
y2 <- c(y2, min(y[abs(x - ii) == min(abs(x - ii))]))
}
# make shaded polygon
if (!cumulative) {
polygon(c(x[1], x, rev(x)[1]), c(0, y, 0),
col = shadecol[iline],
border = NA
)
} else {
# polygon for cumulative has extra point in bottom right
polygon(c(x[1], x, rev(x)[c(1, 1)]), c(0, y, 1, 0),
col = shadecol[iline], border = NA
)
}
# add thicker line
lines(x, y, col = col[iline], lwd = 2)
# add points on line and vertical line at median (hopefully)
if (!cumulative) {
if (densitysymbols) {
points(x2, y2, col = col[iline], pch = pch[iline])
}
# really hokey and assumes that the middle value of
# the vector of quantiles is the median
lines(rep(x2[median(1:length(x2))], 2),
c(0, y2[median(1:length(x2))]),
col = col[iline]
)
} else {
if (densitysymbols) {
points(x2, symbolsQuants, col = col[iline], pch = pch[iline])
}
lines(rep(median(mcmcVals), 2), c(0, 0.5), col = col[iline])
}
} else {
# make normal density for MLE
parval <- vals[1, imodel]
parSD <- valSDs[1, imodel]
if (!is.na(parSD) && parSD > 0) {
xmin <- min(xmin, qnorm(0.005, parval, parSD))
if (limit0) {
xmin <- max(0, xmin) # by default no plot can go below 0
}
if (fix0 & !grepl("R0", parname)) {
xmin <- 0 # include 0 if requested (except for log(R0) plots)
}
x <- seq(xmin, max(xmax, xlim), length = 500)
# x2 <- parval+(-2:2)*parSD # 1 and 2 SDs away from mean to plot symbols
x2 <- qnorm(symbolsQuants, parval, parSD)
if (cumulative) {
y <- mle <- pnorm(x, parval, parSD) # smooth line
y2 <- mle2 <- pnorm(x2, parval, parSD) # symbols
} else {
mle <- dnorm(x, parval, parSD) # smooth line
mle2 <- dnorm(x2, parval, parSD) # symbols
mlescale <- 1 / (sum(mle) * mean(diff(x)))
y <- mle <- mle * mlescale
y2 <- mle2 <- mle2 * mlescale
}
# add shaded polygons
polygon(c(x[1], x, rev(x)[1]), c(0, mle, 0),
col = shadecol[iline], border = NA
)
lines(x, mle, col = col[iline], lwd = 2)
if (!cumulative) {
if (densitysymbols) {
points(x2, mle2, col = col[iline], pch = pch[iline])
}
lines(rep(parval, 2),
c(0, dnorm(parval, parval, parSD) * mlescale),
col = col[iline], lwd = denslwd
)
} else {
if (densitysymbols) {
points(x2, symbolsQuants, col = col[iline], pch = pch[iline])
}
lines(rep(parval, 2),
c(0, 0.5),
col = col[iline], lwd = denslwd
)
}
} else {
# add vertical line for estimate of no density can be added
abline(v = parval, col = col[iline], lwd = denslwd)
}
}
# should be able to move more stuff into this section
# that applies to both MLE and MCMC
if (densitytails & densitymiddle) {
warning(
"You are shading both tails and central 95% of density plots",
"which is illogical"
)
}
doShade <- FALSE
if (mcmcVec[iline]) {
doShade <- TRUE
} else {
if (!is.na(parSD) && parSD > 0) {
doShade <- TRUE
}
}
if (densitytails & doShade) {
# figure out which points are in the tails of the distibutions
x.lower <- x[x <= x2[1]]
y.lower <- y[x <= x2[1]]
x.upper <- x[x >= rev(x2)[1]]
y.upper <- y[x >= rev(x2)[1]]
# add darker shading for tails
polygon(c(x.lower[1], x.lower, rev(x.lower)[1]),
c(0, y.lower, 0),
col = shadecol[iline], border = NA
)
polygon(c(x.upper[1], x.upper, rev(x.upper)[1]),
c(0, y.upper, 0),
col = shadecol[iline], border = NA
)
}
if (densitymiddle & doShade) { # } & !is.na(parSD) && parSD>0){
x.middle <- x[x >= x2[1] & x <= rev(x2)[1]]
y.middle <- y[x >= x2[1] & x <= rev(x2)[1]]
polygon(c(x.middle[1], x.middle, rev(x.middle)[1]),
c(0, y.middle, 0),
col = shadecol[iline], border = NA
)
}
}
# add axes and labels
if (!add) {
abline(h = 0, col = "grey")
xticks <- pretty(xlim)
axis(1, at = xticks, labels = format(xticks / xunits))
theLine <- par()$mgp[1]
if (cumulative) {
axis(2,
at = symbolsQuants, labels = format(symbolsQuants),
cex.axis = 0.9
)
mtext(
side = 2, line = theLine,
text = "Cumulative Probability",
col = par()$col.lab, cex = par()$cex.lab
)
} else {
mtext(
side = 2, line = theLine, text = labels[9],
col = par()$col.lab, cex = par()$cex.lab
)
}
box()
}
if (xunits != 1) {
message(
"x-axis for ", parname, " in density plot has been divided by ",
xunits, " (so may be in units of ", xlab2, ")"
)
}
# add legend
if (legend) {
add_legend(legendlabels,
# override legend location for cumulative plots
# where topleft should always work best
legendloc = ifelse(cumulative, "topleft", legendloc),
legendorder = legendorder,
legendncol = legendncol,
col = col,
pch = pch,
lwd = lwd,
lty = lty
)
}
}
# in the future, this could return the upper y-limit,
# currently there's no control over ylim in these plots
return(NA)
} # end plotDensities function
uncertaintyplots <- intersect(c(2, 4, 6, 8, 10, 12), subplots)
if (!any(uncertainty) & length(uncertaintyplots) > 0) {
# warn if uncertainty is off but uncertainty plots are requested
message(
"skipping plots with uncertainty:",
paste(uncertaintyplots, collapse = ",")
)
}
# subplot 1: spawning biomass
if (1 %in% subplots) {
if (verbose) {
message("subplot 1: spawning biomass")
}
if (plot) {
ymax_vec[1] <- plotSpawnBio(show_uncertainty = FALSE)
}
if (print) {
save_png_comparisons("compare1_spawnbio.png")
ymax_vec[1] <- plotSpawnBio(show_uncertainty = FALSE)
dev.off()
}
}
# subplot 2: spawning biomass with uncertainty intervals
if (2 %in% subplots) {
if (any(uncertainty)) {
if (verbose) {
message("subplot 2: spawning biomass with uncertainty intervals")
}
if (plot) {
ymax_vec[2] <- plotSpawnBio(show_uncertainty = TRUE)
}
if (print) {
save_png_comparisons("compare2_spawnbio_uncertainty.png")
ymax_vec[2] <- plotSpawnBio(show_uncertainty = TRUE)
dev.off()
}
}
}
# subplot 3: biomass ratio
# (hopefully equal to spawning relative spawning biomass)
if (3 %in% subplots) {
if (verbose) {
message("subplot 3: biomass ratio (hopefully equal to fraction of unfished)")
}
if (plot) {
ymax_vec[3] <- plotBratio(show_uncertainty = FALSE)
}
if (print) {
save_png_comparisons("compare3_Bratio.png")
ymax_vec[3] <- plotBratio(show_uncertainty = FALSE)
dev.off()
}
}
# subplot 4: biomass ratio with uncertainty
if (4 %in% subplots) {
if (any(uncertainty)) {
if (verbose) {
message("subplot 4: biomass ratio with uncertainty")
}
if (plot) {
ymax_vec[4] <- plotBratio(show_uncertainty = TRUE)
}
if (print) {
save_png_comparisons("compare4_Bratio_uncertainty.png")
ymax_vec[4] <- plotBratio(show_uncertainty = TRUE)
dev.off()
}
}
}
# subplot 5: SPR ratio
if (5 %in% subplots) {
if (verbose) {
message("subplot 5: SPR ratio")
}
if (plot) {
ymax_vec[5] <- plotSPRratio(show_uncertainty = FALSE)
}
if (print) {
save_png_comparisons("compare5_SPRratio.png")
ymax_vec[5] <- plotSPRratio(show_uncertainty = FALSE)
dev.off()
}
}
# subplot 6: SPR ratio with uncertainty
if (6 %in% subplots) {
if (any(uncertainty)) {
if (verbose) {
message("subplot 6: SPR ratio with uncertainty")
}
if (plot) {
ymax_vec[6] <- plotSPRratio(show_uncertainty = TRUE)
}
if (print) {
save_png_comparisons("compare6_SPRratio_uncertainty.png")
ymax_vec[6] <- plotSPRratio(show_uncertainty = TRUE)
dev.off()
}
}
}
# subplot 7: F (harvest rate or fishing mortality, however defined)
if (7 %in% subplots) {
if (verbose) {
message("subplot 7: F value")
}
if (plot) {
ymax_vec[7] <- plotF(show_uncertainty = FALSE)
}
if (print) {
save_png_comparisons("compare7_Fvalue.png")
ymax_vec[7] <- plotF(show_uncertainty = FALSE)
dev.off()
}
}
# subplot 8: F (harvest rate or fishing mortality, however defined)
# with uncertainty
if (8 %in% subplots) {
if (any(uncertainty)) {
if (verbose) {
message("subplot 8: F value with uncertainty")
}
if (plot) {
ymax_vec[8] <- plotF(show_uncertainty = TRUE)
}
if (print) {
save_png_comparisons("compare8_Fvalue_uncertainty.png")
ymax_vec[8] <- plotF(show_uncertainty = TRUE)
dev.off()
}
}
}
# subplot 9: recruits
if (9 %in% subplots) {
if (verbose) {
message("subplot 9: recruits")
}
if (plot) {
ymax_vec[9] <- plotRecruits(show_uncertainty = FALSE)
}
if (print) {
save_png_comparisons("compare9_recruits.png")
ymax_vec[9] <- plotRecruits(show_uncertainty = FALSE)
dev.off()
}
}
# subplot 10: recruits with uncertainty
if (10 %in% subplots) {
if (any(uncertainty)) {
if (verbose) {
message("subplot 10: recruits with uncertainty")
}
if (plot) {
ymax_vec[10] <- plotRecruits()
}
if (print) {
save_png_comparisons("compare10_recruits_uncertainty.png")
ymax_vec[10] <- plotRecruits()
dev.off()
}
}
}
# subplot 11: recruit devs
if (11 %in% subplots) {
if (verbose) message("subplot 11: recruit devs")
if (is.null(recdevs)) {
message("No recdevs present in the model summary, skipping plot.")
} else {
if (plot) {
ymax_vec[11] <- plotRecDevs(show_uncertainty = FALSE)
}
if (print) {
save_png_comparisons("compare11_recdevs.png")
ymax_vec[11] <- plotRecDevs(show_uncertainty = FALSE)
dev.off()
}
}
}
# subplot 12: recruit devs with uncertainty
if (12 %in% subplots) {
if (any(uncertainty)) {
if (verbose) {
message("subplot 12: recruit devs with uncertainty")
}
if (plot) {
ymax_vec[12] <- plotRecDevs()
}
if (print) {
save_png_comparisons("compare12_recdevs_uncertainty.png")
ymax_vec[12] <- plotRecDevs()
dev.off()
}
}
}
# subplot 13: index fits
if (13 %in% subplots & !is.null(indices) && nrow(indices) > 0) {
if (verbose) {
message("subplot 13: index fits")
}
for (iindex in 1:length(indexfleets[[1]])) {
if (plot) {
ymax_vec[13] <- plotIndices(log = FALSE, iindex = iindex)
}
if (print) {
save_png_comparisons(paste0(
"compare13_indices",
index_plot_suffix[iindex],
".png"
))
ymax_vec[13] <- plotIndices(log = FALSE, iindex = iindex)
dev.off()
}
} # end loop over indices to plot
} # end check for subplot 13
# subplot 14: index fits on a log scale
if (14 %in% subplots & !is.null(indices) && nrow(indices) > 0) {
if (verbose) {
message("subplot 14: index fits on a log scale")
}
for (iindex in 1:length(indexfleets[[1]])) {
if (plot) {
ymax_vec[14] <- plotIndices(log = TRUE, iindex = iindex)
}
if (print) {
save_png_comparisons(paste0(
"compare14_indices_log",
index_plot_suffix[iindex],
".png"
))
ymax_vec[14] <- plotIndices(log = TRUE, iindex = iindex)
dev.off()
}
} # end loop over indices to plot
} # end check for subplot 14
#### unfinished addition of phase plot comparisons
## # subplot 15: phase plot
if (15 %in% subplots) {
if (verbose) {
message("subplot 15: phase plot")
}
if (plot) {
ymax_vec[15] <- plotPhase()
}
if (print) {
save_png_comparisons("compare15_phase_plot.png")
ymax_vec[15] <- plotPhase()
dev.off()
}
}
# subplot 16 and 17: densities, and cumulative probability plots
if (16 %in% subplots | 17 %in% subplots) {
if (any(uncertainty)) {
if (verbose) {
message("subplots 16 and 17: densities")
}
# look for all parameters or derived quantities matching
# the input list of names
expandednames <- NULL
for (i in 1:length(densitynames)) {
matchingnames <- c(
pars[["Label"]],
quants[["Label"]]
)[grep(densitynames[i],
c(pars[["Label"]], quants[["Label"]]),
fixed = TRUE
)]
expandednames <- c(expandednames, matchingnames)
}
if (length(expandednames) == 0) {
warning("No parameter/quantity names matching 'densitynames' input.")
} else {
message(
"Parameter/quantity names matching 'densitynames' input:\n",
paste0(expandednames, collapse = ", ")
)
ndensities <- length(expandednames)
# make a table to store associated x-labels
densitytable <- data.frame(
name = expandednames,
label = expandednames,
stringsAsFactors = FALSE
)
if (!is.null(densityxlabs) && length(densityxlabs) == ndensities) {
densitytable[["label"]] <- densityxlabs
message(
" table of parameter/quantity labels with associated",
" x-axis label:"
)
print(densitytable)
} else {
if (!is.null(densityxlabs)) {
warning(
"length of 'densityxlabs' doesn't match the number of values ",
"matching 'densitynames' so parameter labels will be used instead"
)
}
}
# loop over parameters for densitities
if (16 %in% subplots) {
for (iplot in 1:ndensities) {
# find matching parameter
name <- densitytable[iplot, 1]
xlab <- densitytable[iplot, 2]
# if(verbose) message(" quantity name=",name,"\n",sep="")
if (plot) {
ymax_vec[16] <- plotDensities(
parname = name, xlab = xlab,
denslwd = densitylwd
)
}
if (print) {
save_png_comparisons(paste("compare16_densities_", name, ".png", sep = ""))
ymax_vec[16] <- plotDensities(
parname = name, xlab = xlab,
denslwd = densitylwd
)
dev.off()
}
}
}
# loop again for cumulative densities
if (17 %in% subplots) {
for (iplot in 1:ndensities) {
# find matching parameter
name <- densitytable[iplot, 1]
xlab <- densitytable[iplot, 2]
# if(verbose) message(" quantity name=",name,"\n",sep="")
if (plot) {
ymax_vec[17] <- plotDensities(
parname = name, xlab = xlab,
denslwd = densitylwd,
cumulative = TRUE
)
}
if (print) {
save_png_comparisons(paste("compare17_densities_", name, ".png", sep = ""))
ymax_vec[17] <- plotDensities(
parname = name, xlab = xlab,
denslwd = densitylwd,
cumulative = TRUE
)
dev.off()
}
}
}
}
}
}
#### unfinished addition of growth comparisons
## # subplot 19: growth, females
## if(19 %in% subplots){
## if(verbose) message("subplot 19: growth, females\n")
## if(plot) plotgrowth(sex='f')
## if(print){
## save_png_comparisons("compare19_growth_females.png")
## plotgrowth(sex='f')
## dev.off()
## }
## }
## # subplot 20: growth, males
## if(20 %in% subplots){
## if(verbose) message("subplot 20: growth, males\n")
## if(plot) plotgrowth(sex='m')
## if(print){
## save_png_comparisons("compare20_growth_males.png")
## plotgrowth(sex='m')
## dev.off()
## }
## }
if (pdf) dev.off()
return(invisible(ymax_vec))
}
| /R/SSplotComparisons.R | no_license | yukio-takeuchi/r4ss | R | false | false | 94,463 | r | #' plot model comparisons
#'
#' Creates a user-chosen set of plots comparing model output from a summary of
#' multiple models, where the collection was created using the
#' `SSsummarize` function.
#'
#'
#' @param summaryoutput List created by `SSsummarize`
#' @param subplots Vector of subplots to be created
#' Numbering of subplots is as follows:
#' \itemize{
#' \item 1 spawning biomass
#' \item 2 spawning biomass with uncertainty intervals
#' \item 3 biomass ratio (hopefully equal to fraction of unfished)
#' \item 4 biomass ratio with uncertainty
#' \item 5 SPR ratio
#' \item 6 SPR ratio with uncertainty
#' \item 7 F value
#' \item 8 F value with uncertainty
#' \item 9 recruits
#' \item 10 recruits with uncertainty
#' \item 11 recruit devs
#' \item 12 recruit devs with uncertainty
#' \item 13 index fits
#' \item 14 index fits on a log scale
#' \item 15 phase plot
#' \item 16 densities
#' \item 17 cumulative densities
#' }
#' @template plot
#' @template print
#' @param png Has same result as `print`, included for consistency with
#' `SS_plots`.
#' @param pdf Write output to PDF file? Can't be used in conjunction with
#' `png` or `print`.
#' @param models Optional subset of the models described in
#' `summaryoutput`. Either "all" or a vector of numbers indicating
#' columns in summary tables.
#' @param endyrvec Optional single year or vector of years representing the
#' final year of values to show for each model. By default it is set to the
#' ending year specified in each model.
#' @param indexfleets Fleet numbers for each model to compare
#' indices of abundance. Can take different forms:
#' \itemize{
#' \item NULL: (default) create a separate plot for each index as long as the fleet
#' numbering is the same across all models.
#' \item integer: create a single comparison plot for the chosen index
#' \item vector of length equal to number of models: a single fleet number
#' for each model to be compared in a single plot
#' \item list: list of fleet numbers associated with indices within each
#' model to be compared, where the list elements are each a vector of the
#' same length but the names of the list elements don't matter and can be
#' absent.
#' }
#' @param indexUncertainty Show uncertainty intervals on index data?
#' Default=FALSE because if models have any extra standard deviations added,
#' these intervals may differ across models.
#' @param indexQlabel Add catchability to legend in plot of index fits
#' (TRUE/FALSE)?
#' @param indexQdigits Number of significant digits for catchability in legend
#' (if `indexQlabel = TRUE`)
#' @param indexSEvec Optional replacement for the SE values in
#' `summaryoutput[["indices"]]` to deal with the issue of differing uncertainty by
#' models described above.
#' @param indexPlotEach TRUE plots the observed index for each model with
#' colors, or FALSE just plots observed once in black dots.
#' @template labels
#' @param col Optional vector of colors to be used for lines. Input NULL
#' makes use of `rich.colors.short` function.
#' @param shadecol Optional vector of colors to be used for shading uncertainty
#' intervals. The default (NULL) is to use the same colors provided by
#' `col` (either the default or a user-chosen input) and make them
#' more transparent by applying the `shadealpha` input as an alpha
#' transparency value (using the `adjustcolor()` function)
#' @param pch Optional vector of plot character values
#' @param lty Optional vector of line types
#' @param lwd Optional vector of line widths
#' @param spacepoints Number of years between points shown on top of lines (for
#' long timeseries, points every year get mashed together)
#' @param staggerpoints Number of years to stagger the first point (if
#' `spacepoints > 1`) for each line (so that adjacent lines have points in
#' different years)
#' @param initpoint Year value for first point to be added to lines.
#' Points added to plots are those that satisfy
#' (Yr-initpoint)%%spacepoints == (staggerpoints*iline)%%spacepoints
#' @param tickEndYr TRUE/FALSE switch to turn on/off extra axis mark at final
#' year in timeseries plots.
#' @param shadeForecast TRUE/FALSE switch to turn on off shading of years beyond
#' the maximum ending year of the models
#' @param xlim Optional x limits
#' @param ylimAdj Multiplier for ylim parameter. Allows additional white space
#' to fit legend if necessary. Default=1.05.
#' @param xaxs Choice of xaxs parameter (see ?par for more info)
#' @param yaxs Choice of yaxs parameter (see ?par for more info)
#' @param type Type parameter passed to points (default 'o' overplots points on
#' top of lines)
#' @param uncertainty Show plots with uncertainty intervals? Either a single
#' TRUE/FALSE value, or a vector of TRUE/FALSE values for each model,
#' or a set of integers corresponding to the choice of models.
#' @param shadealpha Transparency adjustment used to make default shadecol
#' values (implemented as `adjustcolor(col=col, alpha.f=shadealpha)`)
#' @template legend
#' @param legendlabels Optional vector of labels to include in legend. Default
#' is 'model1','model2',etc.
#' @template legendloc
#' @param legendorder Optional vector of model numbers that can be used to have
#' the legend display the model names in an order that is different than that
#' which is represented in the summary input object.
#' @param legendncol Number of columns for the legend.
#' @param btarg Target biomass value at which to show a line (set to 0 to
#' remove)
#' @param minbthresh Minimum biomass threshold at which to show a line (set to
#' 0 to remove)
#' @param sprtarg Target value for SPR-ratio where line is drawn in the SPR
#' plots and phase plot.
#' @template pwidth
#' @template pheight
#' @template punits
#' @template res
#' @template ptsize
#' @template plotdir
#' @param filenameprefix Additional text to append to PNG or PDF file names.
#' It will be separated from default name by an underscore.
#' @param densitynames Vector of names (or subset of names) of parameters or
#' derived quantities contained in `summaryoutput[["pars"]][["Label"]]` or
#' `summaryoutput[["quants"]][["Label"]]` for which to make density plots
#' @param densityxlabs Optional vector of x-axis labels to use in the density
#' plots (must be equal in length to the printed vector of quantities that
#' match the `densitynames` input)
#' @param rescale TRUE/FALSE control of automatic rescaling of units into
#' thousands, millions, or billions
#' @param densityscalex Scalar for upper x-limit in density plots (values below
#' 1 will cut off the right tail to provide better contrast among narrower
#' distributions
#' @param densityscaley Scalar for upper y-limit in density plots (values below
#' 1 will cut off top of highest peaks to provide better contrast among broader
#' distributions
#' @param densityadjust Multiplier on bandwidth of kernel in density function
#' used for smoothing MCMC posteriors. See 'adjust' in ?density for details.
#' @param densitysymbols Add symbols along lines in density plots. Quantiles
#' are `c(0.025,0.1,0.25,0.5,0.75,0.9,0.975)`.
#' @param densitytails Shade tails outside of 95% interval darker in
#' density plots?
#' @param densitymiddle Shade middle inside of 95% interval darker in
#' density plots?
#' @param densitylwd Line width for density plots
#' @param fix0 Always include 0 in the density plots?
#' @param new Create new empty plot window
#' @param add Allows single plot to be added to existing figure. This needs to
#' be combined with specific 'subplots' input to make sure only one thing gets
#' added.
#' @param par list of graphics parameter values passed to the `par`
#' function
#' @template verbose
#' @param mcmcVec Vector of TRUE/FALSE values (or single value) indicating
#' whether input values are from MCMC or to use normal distribution around
#' MLE
#' @param show_equilibrium Whether to show the equilibrium values for
#' SSB. For some model comparisons, these might not be comparable and thus
#' useful to turn off. Defaults to TRUE.
#' @author Ian G. Taylor, John R. Wallace
#' @export
#' @seealso [SS_plots()], [SSsummarize()],
#' [SS_output()], [SSgetoutput()]
#' @examples
#' \dontrun{
#' # directories where models were run need to be defined
#' dir1 <- "c:/SS/mod1"
#' dir2 <- "c:/SS/mod2"
#'
#' # read two models
#' mod1 <- SS_output(dir = dir1)
#' mod2 <- SS_output(dir = dir2)
#'
#' # create list summarizing model results
#' mod.sum <- SSsummarize(list(mod1, mod2))
#'
#' # plot comparisons
#' SSplotComparisons(mod.sum, legendlabels = c("First model", "Second model"))
#'
#' # Example showing comparison of MLE to MCMC results where the mcmc would have
#' # been run in the subdirectory 'c:/SS/mod1/mcmc'
#' mod1 <- SS_output(dir = "c:/SS/mod1", dir.mcmc = "mcmc")
#' # pass the same model twice to SSsummarize in order to plot it twice
#' mod.sum <- SSsummarize(list(mod1, mod1))
#' # compare MLE to MCMC
#' SSplotComparisons(mod.sum,
#' legendlabels = c("MCMC", "MLE"),
#' mcmcVec = c(TRUE, FALSE)
#' )
#' }
#'
SSplotComparisons <-
function(summaryoutput, subplots = 1:20,
plot = TRUE, print = FALSE, png = print, pdf = FALSE,
models = "all",
endyrvec = NULL,
indexfleets = NULL,
indexUncertainty = TRUE,
indexQlabel = TRUE,
indexQdigits = 4,
indexSEvec = NULL,
# TRUE in following command plots the observed index for each model
# with colors, or FALSE just plots observed once in black dots
indexPlotEach = FALSE,
labels = c(
"Year", # 1
"Spawning biomass (t)", # 2
"Fraction of unfished", # 3
"Age-0 recruits (1,000s)", # 4
"Recruitment deviations", # 5
"Index", # 6
"Log index", # 7
"SPR-related quantity", # 8 automatically updated when consistent
"Density", # 9
"Management target", # 10
"Minimum stock size threshold", # 11
"Spawning output", # 12
"Harvest rate" # 13
),
col = NULL, shadecol = NULL,
pch = NULL, lty = 1, lwd = 2,
spacepoints = 10,
staggerpoints = 1,
initpoint = 0,
tickEndYr = TRUE,
shadeForecast = TRUE,
xlim = NULL, ylimAdj = 1.05,
xaxs = "i", yaxs = "i",
type = "o", uncertainty = TRUE, shadealpha = 0.1,
legend = TRUE, legendlabels = NULL, legendloc = "topright",
legendorder = NULL, legendncol = 1,
sprtarg = NULL, btarg = NULL, minbthresh = NULL,
pwidth = 6.5, pheight = 5.0, punits = "in", res = 300,
ptsize = 10,
plotdir = NULL,
filenameprefix = "",
densitynames = c("SSB_Virgin", "R0"),
densityxlabs = NULL,
rescale = TRUE,
densityscalex = 1,
densityscaley = 1,
densityadjust = 1,
densitysymbols = TRUE,
densitytails = TRUE,
densitymiddle = FALSE,
densitylwd = 1,
fix0 = TRUE,
new = TRUE,
add = FALSE,
par = list(mar = c(5, 4, 1, 1) + .1),
verbose = TRUE,
mcmcVec = FALSE,
show_equilibrium = TRUE) {
# switch to avoid repetition of warning about mean recruitment
meanRecWarning <- TRUE
ymax_vec <- rep(NA, 17) # vector of ymax values for each plot
# local version of save_png which doesn't relate to plotinfo and
# also adds control over 'filenameprefix' and 'par',
# (where the code is not following good practices and
# those arguments are not formally passed to the function)
save_png_comparisons <- function(file) {
# if extra text requested, add it before extention in file name
file <- paste0(filenameprefix, file)
# open png file
png(
filename = file.path(plotdir, file),
width = pwidth,
height = pheight,
units = punits,
res = res,
pointsize = ptsize
)
# change graphics parameters to input value
par(par)
}
if (png) {
print <- TRUE
}
if (png & is.null(plotdir)) {
stop("To print PNG files, you must supply a directory as 'plotdir'")
}
# check for internal consistency
if (pdf & png) {
stop("To use 'pdf', set 'print' or 'png' to FALSE.")
}
if (pdf) {
if (is.null(plotdir)) {
stop("To write to a PDF, you must supply a directory as 'plotdir'")
}
pdffile <- file.path(
plotdir,
paste0(
filenameprefix, "SSplotComparisons_",
format(Sys.time(), "%d-%b-%Y_%H.%M"), ".pdf"
)
)
pdf(file = pdffile, width = pwidth, height = pheight)
if (verbose) {
message("PDF file with plots will be:", pdffile)
}
par(par)
}
# subfunction to add legend
# legendfun <- function(legendlabels, cumulative = FALSE) {
# if (cumulative) {
# legendloc <- "topleft"
# }
# if (is.numeric(legendloc)) {
# Usr <- par()$usr
# legendloc <- list(
# x = Usr[1] + legendloc[1] * (Usr[2] - Usr[1]),
# y = Usr[3] + legendloc[2] * (Usr[4] - Usr[3])
# )
# }
#
# # if type input is "l" then turn off points on top of lines in legend
# legend.pch <- pch
# if (type == "l") {
# legend.pch <- rep(NA, length(pch))
# }
# legend(legendloc,
# legend = legendlabels[legendorder],
# col = col[legendorder],
# lty = lty[legendorder],
# seg.len = 2,
# lwd = lwd[legendorder],
# pch = legend.pch[legendorder],
# bty = "n",
# ncol = legendncol
# )
# }
# get stuff from summary output
n <- summaryoutput[["n"]]
nsexes <- summaryoutput[["nsexes"]]
startyrs <- summaryoutput[["startyrs"]]
endyrs <- summaryoutput[["endyrs"]]
pars <- summaryoutput[["pars"]]
parsSD <- summaryoutput[["parsSD"]]
parphases <- summaryoutput[["parphases"]]
quants <- summaryoutput[["quants"]]
quantsSD <- summaryoutput[["quantsSD"]]
SpawnBio <- summaryoutput[["SpawnBio"]]
SpawnBioLower <- summaryoutput[["SpawnBioLower"]]
SpawnBioUpper <- summaryoutput[["SpawnBioUpper"]]
Bratio <- summaryoutput[["Bratio"]]
BratioLower <- summaryoutput[["BratioLower"]]
BratioUpper <- summaryoutput[["BratioUpper"]]
SPRratio <- summaryoutput[["SPRratio"]]
SPRratioLower <- summaryoutput[["SPRratioLower"]]
SPRratioUpper <- summaryoutput[["SPRratioUpper"]]
Fvalue <- summaryoutput[["Fvalue"]]
FvalueLower <- summaryoutput[["FvalueLower"]]
FvalueUpper <- summaryoutput[["FvalueUpper"]]
recruits <- summaryoutput[["recruits"]]
recruitsLower <- summaryoutput[["recruitsLower"]]
recruitsUpper <- summaryoutput[["recruitsUpper"]]
recdevs <- summaryoutput[["recdevs"]]
recdevsLower <- summaryoutput[["recdevsLower"]]
recdevsUpper <- summaryoutput[["recdevsUpper"]]
indices <- summaryoutput[["indices"]]
# note that "mcmc" is a a list of dataframes,
# 1 for each model with mcmc output
mcmc <- summaryoutput[["mcmc"]]
lowerCI <- summaryoutput[["lowerCI"]]
upperCI <- summaryoutput[["upperCI"]]
SpawnOutputUnits <- summaryoutput[["SpawnOutputUnits"]]
btargs <- summaryoutput[["btargs"]]
minbthreshs <- summaryoutput[["minbthreshs"]]
sprtargs <- summaryoutput[["sprtargs"]]
SPRratioLabels <- summaryoutput[["SPRratioLabels"]]
FvalueLabels <- summaryoutput[["FvalueLabels"]]
# checking for the same reference points across models
if (is.null(btarg)) {
btarg <- unique(btargs)
if (length(btarg) > 1) {
warning("setting btarg = -999 because models don't have matching values")
btarg <- -999
}
}
if (is.null(minbthresh)) {
minbthresh <- unique(minbthreshs)
if (length(minbthresh) > 1) {
warning("setting minbthresh = -999 because models don't have matching values")
minbthresh <- -999
}
}
if (is.null(sprtarg)) {
sprtarg <- unique(sprtargs)
if (length(sprtarg) > 1) {
warning("setting sprtarg = -999 because models don't have matching values")
sprtarg <- -999
}
}
SPRratioLabel <- unique(SPRratioLabels)
if (length(SPRratioLabel) > 1) {
warning(
"setting label for SPR plot to 8th element of input 'labels' ",
"because the models don't have matching labels"
)
SPRratioLabel <- labels[8]
}
FvalueLabel <- unique(FvalueLabels)
if (length(FvalueLabel) > 1) {
warning(
"setting label for F plot to 13th element of input 'labels' ",
"because the models don't have matching labels"
)
FvalueLabel <- labels[13]
} else {
FvalueLabel <- gsub("_", " ", FvalueLabel)
}
### process input for which models have uncertainty shown
##
# if vector is numeric rather than logical, convert to logical
if (!is.logical(uncertainty) & is.numeric(uncertainty)) {
if (any(!uncertainty %in% 1:n)) {
# stop if numerical values aren't integers <= n
stop(
"'uncertainty' should be a subset of the integers\n",
" 1-", n, ", where n=", n, " is the number of models.\n",
" Or it can be a single TRUE/FALSE value.\n",
" Or a vector of TRUE/FALSE, of length n=", n
)
} else {
# convert integers to logical
uncertainty <- 1:n %in% uncertainty
}
}
# if a single value, repeat for all models
if (is.logical(uncertainty) & length(uncertainty) == 1) {
uncertainty <- rep(uncertainty, n)
}
# if all that hasn't yet made it length n, then stop
if (length(uncertainty) != n) {
stop(
"'uncertainty' as TRUE/FALSE should have length 1 or n.\n",
" length(uncertainty) = ", length(uncertainty)
)
}
# some feedback about uncertainty settings
if (all(uncertainty)) {
message("showing uncertainty for all models")
}
if (!any(uncertainty)) {
message("not showing uncertainty for any models")
}
if (any(uncertainty) & !all(uncertainty)) {
message(
"showing uncertainty for model",
ifelse(sum(uncertainty) > 1, "s: ", " "),
paste(which(uncertainty), collapse = ",")
)
}
for (i in 1:n) {
if (all(is.na(quantsSD[, i]) | quantsSD[, i] == 0)) {
message("No uncertainty available for model ", i)
uncertainty[i] <- FALSE
}
}
#### no longer dividing by 2 for single-sex models
if (length(unique(nsexes)) > 1) {
warning(
"SSplotComparisons no longer divides SpawnBio by 2 for single-sex models\n",
"to get female-only spawning biomass output by SS for a single-sex model,\n",
"use the new Nsexes = -1 option in the data file."
)
}
# check number of models to be plotted
if (models[1] == "all") {
models <- 1:n
}
nlines <- length(models)
# check for mcmc
if (any(mcmcVec) & length(mcmc) == 0) {
mcmcVec <- FALSE
warning("Setting mcmcVec = FALSE because summaryoutput[['mcmc']] is empty")
}
# check length of mcmcVec
if (nlines > 1 & length(mcmcVec) == 1) {
mcmcVec <- rep(mcmcVec, nlines)
}
if (nlines != length(mcmcVec)) {
stop("Input 'mcmcVec' must equal 1 or the number of models.\n")
}
# if index plots are requested, do some checks on inputs
if (any(subplots %in% 13:14) & !is.null(indices) && nrow(indices) > 0) {
# check indexfleets
if (is.null(indexfleets)) {
# if indexfleets is NULL
indexfleets <- list()
for (imodel in 1:n) {
indexfleets[[paste0("model", imodel)]] <-
sort(unique(indices[["Fleet"]][indices[["imodel"]] == imodel]))
}
} else {
# if indexfleets is provided
if (!is.null(indexfleets)) {
# if a single number is provided, then repeat it n times
if (is.vector(indexfleets) & length(indexfleets) == 1) {
indexfleets <- rep(indexfleets, n)
}
if (length(indexfleets) != n) {
warning(
"Skipping index plots: length(indexfleets) should be 1 or n = ",
n, "."
)
indexfleets <- NULL
}
}
}
# check for mismatched lengths of list elements
if (!length(unique(lapply(indexfleets, FUN = length))) == 1) {
warning(
"Skipping index plots;\n",
"Fleets have different numbers of indices listed in 'indexfleets'."
)
indexfleets <- NULL
}
# figure out suffix to add to index plots
index_plot_suffix <- rep("", length(indexfleets))
# if more than one index is compared, add suffix to filename
if (length(indexfleets[[1]]) > 1) {
for (iindex in 1:length(indexfleets[[1]])) {
fleets <- as.numeric(data.frame(indexfleets)[iindex, ])
if (length(unique(fleets)) == 1) {
index_plot_suffix[iindex] <- paste0("_flt", fleets[1])
} else {
index_plot_suffix[iindex] <- paste0("_index", iindex)
}
}
}
} # end check for index plots (subplots %in% 13:14)
# setup colors, points, and line types
if (is.null(col) & nlines > 3) col <- rich.colors.short(nlines + 1)[-1]
if (is.null(col) & nlines < 3) col <- rich.colors.short(nlines)
if (is.null(col) & nlines == 3) col <- c("blue", "red", "green3")
if (is.null(shadecol)) {
# new approach thanks to Trevor Branch
shadecol <- adjustcolor(col, alpha.f = shadealpha)
}
# set pch values if no input
if (is.null(pch)) {
pch <- rep(1:25, 10)[1:nlines]
}
# if line stuff is shorter than number of lines, recycle as needed
if (length(col) < nlines) col <- rep(col, nlines)[1:nlines]
if (length(pch) < nlines) pch <- rep(pch, nlines)[1:nlines]
if (length(lty) < nlines) lty <- rep(lty, nlines)[1:nlines]
if (length(lwd) < nlines) lwd <- rep(lwd, nlines)[1:nlines]
if (!is.expression(legendlabels[1]) && is.null(legendlabels)) {
legendlabels <- paste("model", 1:nlines)
}
# open new window if requested
if (plot & new & !pdf) {
dev.new(
width = pwidth,
height = pheight,
pointsize = ptsize,
record = TRUE
)
par(par)
}
# get MCMC results if requested
for (iline in (1:nlines)[mcmcVec]) {
imodel <- models[iline]
# reset values to NA for mcmc columns only
cols <- imodel
SpawnBioLower[, cols] <- SpawnBioUpper[, cols] <- SpawnBio[, cols] <- NA
BratioLower[, cols] <- BratioUpper[, cols] <- Bratio[, cols] <- NA
SPRratioLower[, cols] <- SPRratioUpper[, cols] <- SPRratio[, cols] <- NA
recruitsLower[, cols] <- recruitsUpper[, cols] <- recruits[, cols] <- NA
recdevsLower[, cols] <- recdevsUpper[, cols] <- recdevs[, cols] <- NA
### get MCMC for SpawnBio
tmp <- grep("SSB", names(mcmc[[imodel]])) # try it to see what you get
# exclude rows that aren't part of the timseries
tmp2 <- c(
grep("SSB_unfished", names(mcmc[[imodel]]), ignore.case = TRUE),
grep("SSB_Btgt", names(mcmc[[imodel]]), ignore.case = TRUE),
grep("SSB_SPRtgt", names(mcmc[[imodel]]), ignore.case = TRUE),
grep("SSB_MSY", names(mcmc[[imodel]]), ignore.case = TRUE)
)
tmp <- setdiff(tmp, tmp2)
if (length(tmp) > 0) { # there are some mcmc values to use
# subset of columns from MCMC for this model
mcmc.tmp <- mcmc[[imodel]][, tmp]
mcmclabs <- names(mcmc.tmp)
lower <- apply(mcmc.tmp, 2, quantile, prob = lowerCI, na.rm = TRUE)
med <- apply(mcmc.tmp, 2, quantile, prob = 0.5, na.rm = TRUE)
upper <- apply(mcmc.tmp, 2, quantile, prob = upperCI, na.rm = TRUE)
SpawnBio[, imodel] <- med[match(SpawnBio[["Label"]], mcmclabs)]
SpawnBioLower[, imodel] <- lower[match(SpawnBioLower[["Label"]], mcmclabs)]
SpawnBioUpper[, imodel] <- upper[match(SpawnBioUpper[["Label"]], mcmclabs)]
}
### get MCMC for Bratio
tmp <- grep("Bratio", names(mcmc[[imodel]])) # try it to see what you get
if (length(tmp) > 0) { # there are some mcmc values to use
# subset of columns from MCMC for this model
mcmc.tmp <- mcmc[[imodel]][, tmp]
mcmclabs <- names(mcmc.tmp)
lower <- apply(mcmc.tmp, 2, quantile, prob = lowerCI, na.rm = TRUE)
med <- apply(mcmc.tmp, 2, quantile, prob = 0.5, na.rm = TRUE)
upper <- apply(mcmc.tmp, 2, quantile, prob = upperCI, na.rm = TRUE)
Bratio[, imodel] <- med[match(Bratio[["Label"]], mcmclabs)]
BratioLower[, imodel] <- lower[match(BratioLower[["Label"]], mcmclabs)]
BratioUpper[, imodel] <- upper[match(BratioUpper[["Label"]], mcmclabs)]
}
### get MCMC for SPRratio
# try it to see what you get
tmp <- grep("SPRratio", names(mcmc[[imodel]]))
if (length(tmp) > 0) { # there are some mcmc values to use
# subset of columns from MCMC for this model
mcmc.tmp <- mcmc[[imodel]][, tmp]
mcmclabs <- names(mcmc.tmp)
lower <- apply(mcmc.tmp, 2, quantile, prob = lowerCI, na.rm = TRUE)
med <- apply(mcmc.tmp, 2, quantile, prob = 0.5, na.rm = TRUE)
upper <- apply(mcmc.tmp, 2, quantile, prob = upperCI, na.rm = TRUE)
SPRratio[, imodel] <- med[match(SPRratio[["Label"]], mcmclabs)]
SPRratioLower[, imodel] <- lower[match(SPRratioLower[["Label"]], mcmclabs)]
SPRratioUpper[, imodel] <- upper[match(SPRratioUpper[["Label"]], mcmclabs)]
}
### get MCMC for recruits
tmp <- grep("^Recr_", names(mcmc[[imodel]])) # try it to see what you get
# exclude rows that aren't part of the timseries
tmp2 <- grep("Recr_unfished", names(mcmc[[imodel]]), ignore.case = TRUE)
tmp <- setdiff(tmp, tmp2)
if (length(tmp) > 0) { # there are some mcmc values to use
# subset of columns from MCMC for this model
mcmc.tmp <- mcmc[[imodel]][, tmp]
mcmclabs <- names(mcmc.tmp)
lower <- apply(mcmc.tmp, 2, quantile, prob = lowerCI, na.rm = TRUE)
med <- apply(mcmc.tmp, 2, quantile, prob = 0.5, na.rm = TRUE)
# mean recruitment should be more comparable
mean <- apply(mcmc.tmp, 2, mean, na.rm = TRUE)
upper <- apply(mcmc.tmp, 2, quantile, prob = upperCI, na.rm = TRUE)
if (!meanRecWarning) {
message(
"note: using mean recruitment from MCMC instead of median,\n",
"because it is more comparable to MLE\n"
)
meanRecWarning <- TRUE
}
recruits[, imodel] <- mean[match(recruits[["Label"]], mcmclabs)]
recruitsLower[, imodel] <- lower[match(recruitsLower[["Label"]], mcmclabs)]
recruitsUpper[, imodel] <- upper[match(recruitsUpper[["Label"]], mcmclabs)]
}
### get MCMC for recdevs
# get values from mcmc to replace
tmp <- unique(c(
grep("_RecrDev_", names(mcmc[[imodel]])),
grep("_InitAge_", names(mcmc[[imodel]])),
grep("ForeRecr_", names(mcmc[[imodel]]))
))
if (length(tmp) > 0) { # there are some mcmc values to use
# subset of columns from MCMC for this model
mcmc.tmp <- mcmc[[imodel]][, tmp]
mcmclabs <- names(mcmc.tmp)
lower <- apply(mcmc.tmp, 2, quantile, prob = lowerCI, na.rm = TRUE)
med <- apply(mcmc.tmp, 2, quantile, prob = 0.5, na.rm = TRUE)
upper <- apply(mcmc.tmp, 2, quantile, prob = upperCI, na.rm = TRUE)
recdevs[, imodel] <- med[match(recdevs[["Label"]], mcmclabs)]
recdevsLower[, imodel] <- lower[match(recdevsLower[["Label"]], mcmclabs)]
recdevsUpper[, imodel] <- upper[match(recdevsUpper[["Label"]], mcmclabs)]
}
}
if (is.null(endyrvec)) {
endyrvec <- endyrs + 1
}
if (length(endyrvec) == 1) {
endyrvec <- rep(endyrvec, nlines)
}
# not sure why there should be NA values for Yr column in recdevs,
# but old code to eliminate the devs past endyr wasn't working as
# configured before
recdevs <- recdevs[!is.na(recdevs[["Yr"]]), ]
recdevsLower <- recdevsLower[!is.na(recdevsLower[["Yr"]]), ]
recdevsUpper <- recdevsUpper[!is.na(recdevsUpper[["Yr"]]), ]
# change to NA any values beyond endyr
if (!is.null(endyrvec)) {
for (iline in 1:nlines) {
endyr <- endyrvec[iline]
imodel <- models[iline]
SpawnBio[SpawnBio[["Yr"]] > endyr, imodel] <- NA
SpawnBioLower[SpawnBio[["Yr"]] > endyr, imodel] <- NA
SpawnBioUpper[SpawnBio[["Yr"]] > endyr, imodel] <- NA
Bratio[Bratio[["Yr"]] > endyr, imodel] <- NA
BratioLower[Bratio[["Yr"]] > endyr, imodel] <- NA
BratioUpper[Bratio[["Yr"]] > endyr, imodel] <- NA
#### note: add generalized startyrvec option in the future
## if(exists("startyrvec")){
## startyr <- startyrvec[iline]
## Bratio[Bratio[["Yr"]] < startyr, imodel] <- NA
## BratioLower[Bratio[["Yr"]] < startyr, imodel] <- NA
## BratioUpper[Bratio[["Yr"]] < startyr, imodel] <- NA
## }
SPRratio[SPRratio[["Yr"]] >= endyr, imodel] <- NA
SPRratioLower[SPRratio[["Yr"]] >= endyr, imodel] <- NA
SPRratioUpper[SPRratio[["Yr"]] >= endyr, imodel] <- NA
Fvalue[Fvalue[["Yr"]] >= endyr, imodel] <- NA
FvalueLower[Fvalue[["Yr"]] >= endyr, imodel] <- NA
FvalueUpper[Fvalue[["Yr"]] >= endyr, imodel] <- NA
recruits[recruits[["Yr"]] > endyr, imodel] <- NA
recruitsLower[recruits[["Yr"]] > endyr, imodel] <- NA
recruitsUpper[recruits[["Yr"]] > endyr, imodel] <- NA
if (!is.null(recdevs)) {
recdevs[recdevs[["Yr"]] > endyr, imodel] <- NA
recdevsLower[recdevs[["Yr"]] > endyr, imodel] <- NA
recdevsUpper[recdevs[["Yr"]] > endyr, imodel] <- NA
}
}
}
# function to add shaded uncertainty intervals behind line
# requires the existence of the TRUE/FALSE vector "uncertainty"
addpoly <- function(yrvec, lower, upper) {
lower[lower < 0] <- 0 # max of value or 0
for (iline in (1:nlines)[uncertainty]) {
imodel <- models[iline]
good <- !is.na(lower[, imodel]) & !is.na(upper[, imodel])
polygon(
x = c(yrvec[good], rev(yrvec[good])),
y = c(lower[good, imodel], rev(upper[good, imodel])),
border = NA, col = shadecol[iline]
)
# lines(yrvec[good],lower[good,imodel],lty=3,col=col[iline])
# lines(yrvec[good],upper[good,imodel],lty=3,col=col[iline])
}
}
## equ <- -(1:2) # IGT 2020/3/12: this variable seems to not be used
# function to plot spawning biomass
plotSpawnBio <- function(show_uncertainty = TRUE) {
# only show uncertainty if values are present for at least one model
if (!any(uncertainty)) {
show_uncertainty <- FALSE
}
# get axis limits
if (is.null(xlim)) {
if (show_equilibrium) {
xlim <- range(SpawnBio[["Yr"]])
} else {
xlim <- range(SpawnBio[["Yr"]][-c(1, 2)])
}
if (!is.null(endyrvec) & all(endyrvec < max(xlim))) {
xlim[2] <- max(endyrvec)
}
}
ylim <- ylimAdj * range(0, SpawnBio[
SpawnBio[["Yr"]] >= xlim[1] &
SpawnBio[["Yr"]] <= xlim[2],
models
], na.rm = TRUE)
if (show_uncertainty) {
ylim <- range(ylim, ylimAdj * SpawnBioUpper[
SpawnBio[["Yr"]] >= xlim[1] &
SpawnBio[["Yr"]] <= xlim[2],
models[uncertainty]
], na.rm = TRUE)
}
# set units on spawning biomass plot
if (length(unique(SpawnOutputUnits)) != 1) {
warning(
"Some models may have different units",
" for spawning output than others"
)
}
if (any(SpawnOutputUnits == "numbers")) {
ylab <- labels[12] # numbers
} else {
ylab <- labels[2] # biomass
}
# do some scaling of y-axis
yunits <- 1
if (rescale & ylim[2] > 1e3 & ylim[2] < 1e6) {
yunits <- 1e3
ylab <- gsub("(t)", "(x1000 t)", ylab, fixed = TRUE)
ylab <- gsub("eggs", "x1000 eggs", ylab, fixed = TRUE)
}
if (rescale & ylim[2] > 1e6) {
yunits <- 1e6
ylab <- gsub("(t)", "(million t)", ylab, fixed = TRUE)
ylab <- gsub("eggs", "millions of eggs", ylab, fixed = TRUE)
}
if (rescale & ylim[2] > 1e9) {
yunits <- 1e9
ylab <- gsub("million", "billion", ylab, fixed = TRUE)
}
if (!add) {
plot(0,
type = "n", xlim = xlim, ylim = ylim, xlab = labels[1], ylab = ylab,
xaxs = xaxs, yaxs = yaxs, axes = FALSE
)
}
if (show_uncertainty) {
# add shading for undertainty
addpoly(
yrvec = SpawnBio[["Yr"]][-(1:2)], lower = SpawnBioLower[-(1:2), ],
upper = SpawnBioUpper[-(1:2), ]
)
# equilibrium spawning biomass year by model
xEqu <- SpawnBio[["Yr"]][2] - (1:nlines) / nlines
} else {
# equilibrium spawning biomass year by model
xEqu <- rep(SpawnBio[["Yr"]][2], nlines)
}
# draw points and lines
if (spacepoints %in% c(0, 1, FALSE)) { # don't spread out points
matplot(SpawnBio[["Yr"]][-(1:2)], SpawnBio[-(1:2), models],
col = col, pch = pch, lty = lty, lwd = lwd, type = type,
ylim = ylim, add = TRUE
)
} else {
# spread out points with interval equal to spacepoints and
# staggering equal to staggerpoints
matplot(SpawnBio[["Yr"]][-(1:2)], SpawnBio[-(1:2), models],
col = col, lty = lty, lwd = lwd, type = "l", ylim = ylim, add = TRUE
)
SpawnBio2 <- SpawnBio
for (iline in 1:nlines) {
imodel <- models[iline]
SpawnBio2[
(SpawnBio2[["Yr"]] - initpoint) %% spacepoints !=
(staggerpoints * iline) %% spacepoints,
imodel
] <- NA
}
matplot(SpawnBio2[["Yr"]][-(1:2)], SpawnBio2[-(1:2), models],
col = col, pch = pch, lwd = lwd, type = "p", ylim = ylim, add = TRUE
)
}
if (show_equilibrium) {
## add arrows for equilibrium values
old_warn <- options()$warn # previous setting
options(warn = -1) # turn off "zero-length arrow" warning
if (show_uncertainty) {
arrows(
x0 = xEqu[models[uncertainty]],
y0 = as.numeric(SpawnBioLower[1, models[uncertainty]]),
x1 = xEqu[models[uncertainty]],
y1 = as.numeric(SpawnBioUpper[1, models[uncertainty]]),
length = 0.01, angle = 90, code = 3, col = col[uncertainty],
lwd = 2
)
}
options(warn = old_warn) # returning to old value
## add points at equilibrium values
points(
x = xEqu, SpawnBio[1, models], col = col, pch = pch,
cex = 1.2, lwd = lwd
)
}
# add axes
if (!add) {
abline(h = 0, col = "grey")
if (tickEndYr) { # include ending year in axis labels
# default tick positions if axis(1) were run
ticks <- graphics::axTicks(1)
# make axis (excluding anything after the max ending year)
axis(1, at = c(ticks[ticks < max(endyrvec)], max(endyrvec)))
} else {
# nothing special (may include labels beyond the ending year)
axis(1)
}
# add shaded area over forecast years if more than 1 forecast year shown
if (!is.null(endyrvec) &
max(endyrvec) > 1 + max(endyrs) &
shadeForecast) {
rect(
xleft = max(endyrs) + 1, ybottom = par()$usr[3],
xright = par()$usr[2], ytop = par()$usr[4],
col = gray(0, alpha = 0.1), border = NA
)
}
yticks <- pretty(ylim)
axis(2, at = yticks, labels = format(yticks / yunits), las = 1)
box()
}
if (legend) {
# add legend if requested
add_legend(legendlabels,
legendloc = legendloc,
legendorder = legendorder,
legendncol = legendncol,
col = col,
pch = pch,
lwd = lwd,
lty = lty
)
}
# return upper y-limit
return(ylim[2])
}
# function to plot biomass ratio (may be identical to previous plot)
plotBratio <- function(show_uncertainty = TRUE) {
# only show uncertainty if values are present for at least one model
if (!any(uncertainty)) {
show_uncertainty <- FALSE
}
# get axis limits
if (is.null(xlim)) {
xlim <- range(Bratio[["Yr"]])
if (!is.null(endyrvec) & all(endyrvec < max(xlim))) {
xlim[2] <- max(endyrvec)
}
}
ylim <- ylimAdj * range(0, Bratio[
Bratio[["Yr"]] >= xlim[1] &
Bratio[["Yr"]] <= xlim[2],
models
], na.rm = TRUE)
if (show_uncertainty) {
ylim <- ylimAdj * range(ylim / ylimAdj, BratioUpper[
Bratio[["Yr"]] >= xlim[1] &
Bratio[["Yr"]] <= xlim[2],
models[uncertainty]
], na.rm = TRUE)
}
# make plot
if (!add) {
plot(0,
type = "n", xlim = xlim, ylim = ylim,
xlab = labels[1], ylab = labels[3],
xaxs = xaxs, yaxs = yaxs, axes = FALSE
)
}
if (show_uncertainty) {
addpoly(Bratio[["Yr"]], lower = BratioLower, upper = BratioUpper)
}
if (spacepoints %in% c(0, 1, FALSE)) { # don't spread out points
matplot(Bratio[["Yr"]], Bratio[, models],
col = col, pch = pch,
lty = lty, lwd = lwd, type = type, ylim = ylim, add = TRUE
)
} else {
# spread out points with interval equal to spacepoints and
# staggering equal to staggerpoints
matplot(Bratio[["Yr"]], Bratio[, models],
col = col, pch = pch,
lty = lty, lwd = lwd, type = "l", ylim = ylim, add = TRUE
)
if (type != "l") {
Bratio2 <- Bratio
for (iline in 1:nlines) {
imodel <- models[iline]
Bratio2[(Bratio2[["Yr"]] - initpoint) %% spacepoints !=
(staggerpoints * iline) %% spacepoints, imodel] <- NA
}
matplot(Bratio2[["Yr"]], Bratio2[, models],
col = col, pch = pch,
lty = lty, lwd = lwd, type = "p", ylim = ylim, add = TRUE
)
}
}
yticks <- pretty(par()$yaxp[1:2])
if (btarg > 0) {
abline(h = btarg, col = "red", lty = 2)
text(min(Bratio[["Yr"]]) + 4, btarg + 0.03, labels[10], adj = 0)
yticks <- sort(c(btarg, yticks))
}
if (minbthresh > 0) {
abline(h = minbthresh, col = "red", lty = 2)
text(min(Bratio[["Yr"]]) + 4, minbthresh + 0.03, labels[11], adj = 0)
yticks <- sort(c(minbthresh, yticks))
}
if (!add) {
abline(h = 0, col = "grey")
abline(h = 1, col = "grey", lty = 2)
if (tickEndYr) { # include ending year in axis labels
# default tick positions if axis(1) were run
ticks <- graphics::axTicks(1)
# make axis (excluding anything after the max ending year)
axis(1, at = c(ticks[ticks < max(endyrvec)], max(endyrvec)))
} else {
# nothing special (may include labels beyond the ending year)
axis(1)
}
# add shaded area over forecast years if more than 1 forecast year shown
if (!is.null(endyrvec) &
max(endyrvec) > 1 + max(endyrs) &
shadeForecast) {
rect(
xleft = max(endyrs) + 1, ybottom = par()$usr[3],
xright = par()$usr[2], ytop = par()$usr[4],
col = gray(0, alpha = 0.1), border = NA
)
}
axis(2, at = yticks, las = 1)
box()
}
if (legend) {
# add legend if requested
add_legend(legendlabels,
legendloc = legendloc,
legendorder = legendorder,
legendncol = legendncol,
col = col,
pch = pch,
lwd = lwd,
lty = lty
)
}
# return upper y-limit
return(ylim[2])
}
plotSPRratio <- function(show_uncertainty = TRUE) {
# plot SPR quantity (may be ratio or raw value)
# only show uncertainty if values are present for at least one model
if (!any(uncertainty)) {
show_uncertainty <- FALSE
}
# get axis limits
if (is.null(xlim)) {
xlim <- range(SPRratio[["Yr"]])
if (!is.null(endyrvec) & all(endyrvec < max(xlim))) {
xlim[2] <- max(endyrvec)
}
}
ylim <- ylimAdj * range(0, SPRratio[
SPRratio[["Yr"]] >= xlim[1] &
SPRratio[["Yr"]] <= xlim[2],
models
], na.rm = TRUE)
if (show_uncertainty) {
ylim <- ylimAdj * range(ylim / ylimAdj,
SPRratioUpper[
SPRratio[["Yr"]] >= xlim[1] &
SPRratio[["Yr"]] <= xlim[2],
models[uncertainty]
],
na.rm = TRUE
)
}
par(par)
# make plot
if (!add) {
if (isTRUE(!is.na(SPRratioLabel) &&
SPRratioLabel ==
paste0("(1-SPR)/(1-SPR_", floor(100 * sprtarg), "%)"))) {
# add to right-hand outer margin to make space
# for second vertical axis
# store current margin parameters
# save old margins
newmar <- oldmar <- par()$mar
# match right-hand margin value to left-hand value
newmar[4] <- newmar[2]
# update graphics parameters
par(mar = newmar)
}
plot(0,
type = "n", xlim = xlim, ylim = ylim, xlab = labels[1],
ylab = "", xaxs = xaxs, yaxs = yaxs, las = 1, axes = FALSE
)
axis(2)
}
if (show_uncertainty) {
addpoly(SPRratio[["Yr"]], lower = SPRratioLower, upper = SPRratioUpper)
}
if (spacepoints %in% c(0, 1, FALSE)) { # don't spread out points
matplot(SPRratio[["Yr"]], SPRratio[, models],
col = col, pch = pch,
lty = lty, lwd = lwd, type = type, ylim = ylim, add = TRUE
)
} else {
# spread out points with interval equal to spacepoints and
# staggering equal to staggerpoints
matplot(SPRratio[["Yr"]], SPRratio[, models],
col = col, pch = pch,
lty = lty, lwd = lwd, type = "l", ylim = ylim, add = TRUE
)
if (type != "l") {
SPRratio2 <- SPRratio
for (iline in 1:nlines) {
imodel <- models[iline]
SPRratio2[(SPRratio2[["Yr"]] - initpoint) %% spacepoints !=
(staggerpoints * iline) %% spacepoints, imodel] <- NA
}
matplot(SPRratio2[["Yr"]], SPRratio2[, models],
col = col, pch = pch,
lty = lty, lwd = lwd, type = "p", ylim = ylim, add = TRUE
)
}
}
abline(h = 0, col = "grey")
if (sprtarg > 0) {
if (isTRUE(SPRratioLabel == "1-SPR")) {
# if starter file chooses raw SPR as the option for reporting,
# don't show ratio
abline(h = sprtarg, col = "red", lty = 2)
text(SPRratio[["Yr"]][1] + 4, (sprtarg + 0.03), labels[10],
adj = 0
)
mtext(
side = 2, text = SPRratioLabel,
line = par()$mgp[1], col = par()$col.lab, cex = par()$cex.lab
)
} else {
# draw line at sprtarg
yticks <- pretty(ylim)
if (isTRUE(!is.na(SPRratioLabel) &&
SPRratioLabel == paste0(
"(1-SPR)/(1-SPR_",
floor(100 * sprtarg), "%)"
))) {
# add right-hand vertical axis showing 1-SPR
abline(h = 1, col = "red", lty = 2)
text(SPRratio[["Yr"]][1] + 4, 1 + 0.03, labels[10], adj = 0)
axis(4, at = yticks, labels = yticks * (1 - sprtarg), las = 1)
mtext(
side = 4, text = "1 - SPR",
line = par()$mgp[1], col = par()$col.lab, cex = par()$cex.lab
)
# line below has round to be more accurate
# than the floor which is used
# in the test above and in SS
mtext(
side = 2, text = paste("(1-SPR)/(1-SPR_", 100 * sprtarg, "%)", sep = ""),
line = par()$mgp[1], col = par()$col.lab, cex = par()$cex.lab
)
} else {
message(
"No line added to SPR ratio plot, ",
"as the settings used in this model ",
"have not yet been configured in SSplotComparisons."
)
mtext(
side = 2, text = SPRratioLabel,
line = par()$mgp[1], col = par()$col.lab, cex = par()$cex.lab
)
}
}
} else {
mtext(
side = 2, text = SPRratioLabel,
line = par()$mgp[1], col = par()$col.lab, cex = par()$cex.lab
)
}
if (!add) {
if (tickEndYr) { # include ending year in axis labels
# default tick positions if axis(1) were run
ticks <- graphics::axTicks(1)
# make axis (excluding anything after the max ending year)
axis(1, at = c(ticks[ticks < max(endyrvec)], max(endyrvec)))
} else {
# nothing special (may include labels beyond the ending year)
axis(1)
}
# add shaded area over forecast years if more than 1 forecast year shown
if (!is.null(endyrvec) &
max(endyrvec) > 1 + max(endyrs) &
shadeForecast) {
rect(
xleft = max(endyrs) + 1, ybottom = par()$usr[3],
xright = par()$usr[2], ytop = par()$usr[4],
col = gray(0, alpha = 0.1), border = NA
)
}
}
if (legend) {
# add legend if requested
add_legend(legendlabels,
legendloc = legendloc,
legendorder = legendorder,
legendncol = legendncol,
col = col,
pch = pch,
lwd = lwd,
lty = lty
)
}
box()
if (exists("oldmar")) {
# restore old margin parameters
par(mar = oldmar)
}
# return upper y-limit
return(ylim[2])
}
#### fishing mortality (however it is specified in the models)
plotF <- function(show_uncertainty = TRUE) {
# plot biomass ratio (may be identical to previous plot)
# only show uncertainty if values are present for at least one model
if (!any(uncertainty)) {
show_uncertainty <- FALSE
}
# get axis limits
if (is.null(xlim)) {
xlim <- range(Fvalue[["Yr"]])
if (!is.null(endyrvec) & all(endyrvec < max(xlim))) {
xlim[2] <- max(endyrvec)
}
}
ylim <- ylimAdj * range(0, Fvalue[
Fvalue[["Yr"]] >= xlim[1] &
Fvalue[["Yr"]] <= xlim[2],
models
], na.rm = TRUE)
if (show_uncertainty) {
ylim <- ylimAdj * range(ylim / ylimAdj,
FvalueUpper[
Fvalue[["Yr"]] >= xlim[1] &
Fvalue[["Yr"]] <= xlim[2],
models[uncertainty]
],
na.rm = TRUE
)
}
par(par)
# make plot
if (!add) {
plot(0,
type = "n", xlim = xlim, ylim = ylim, xlab = labels[1],
ylab = "", xaxs = xaxs, yaxs = yaxs, las = 1, axes = FALSE
)
if (tickEndYr) { # include ending year in axis labels
# default tick positions if axis(1) were run
ticks <- graphics::axTicks(1)
# make axis (excluding anything after the max ending year)
axis(1, at = c(ticks[ticks < max(endyrvec)], max(endyrvec)))
} else {
# nothing special (may include labels beyond the ending year)
axis(1)
}
axis(2)
}
if (show_uncertainty) {
addpoly(Fvalue[["Yr"]], lower = FvalueLower, upper = FvalueUpper)
}
if (spacepoints %in% c(0, 1, FALSE)) { # don't spread out points
matplot(Fvalue[["Yr"]], Fvalue[, models],
col = col, pch = pch,
lty = lty, lwd = lwd, type = type, ylim = ylim, add = TRUE
)
} else {
# spread out points with interval equal to spacepoints and
# staggering equal to staggerpoints
matplot(Fvalue[["Yr"]], Fvalue[, models],
col = col, pch = pch,
lty = lty, lwd = lwd, type = "l", ylim = ylim, add = TRUE
)
if (type != "l") {
Fvalue2 <- Fvalue
for (iline in 1:nlines) {
imodel <- models[iline]
Fvalue2[Fvalue2[["Yr"]] %% spacepoints !=
(staggerpoints * iline) %% spacepoints, imodel] <- NA
}
matplot(Fvalue2[["Yr"]], Fvalue2[, models],
col = col, pch = pch,
lty = lty, lwd = lwd, type = "p", ylim = ylim, add = TRUE
)
}
}
abline(h = 0, col = "grey")
mtext(
side = 2, text = FvalueLabel,
line = par()$mgp[1], col = par()$col.lab, cex = par()$cex.lab
)
box()
if (legend) {
add_legend(legendlabels,
legendloc = legendloc,
legendorder = legendorder,
legendncol = legendncol,
col = col,
pch = pch,
lwd = lwd,
lty = lty
)
}
# return upper y-limit
return(ylim[2])
}
plotRecruits <- function(show_uncertainty = TRUE, recruit_lines = TRUE) {
# plot recruitment
# only show uncertainty if values are present for at least one model
if (!any(uncertainty)) {
show_uncertainty <- FALSE
}
# determine x-limits
if (is.null(xlim)) {
if (show_equilibrium) {
xlim <- range(recruits[["Yr"]])
} else {
xlim <- range(recruits[["Yr"]][-c(1, 2)])
}
if (!is.null(endyrvec) & all(endyrvec < max(xlim))) {
xlim[2] <- max(endyrvec)
}
}
# determine y-limits
ylim <- ylimAdj * range(0, recruits[
recruits[["Yr"]] >= xlim[1] &
recruits[["Yr"]] <= xlim[2],
models
], na.rm = TRUE)
if (show_uncertainty) {
ylim <- ylimAdj * range(ylim / ylimAdj,
recruitsUpper[
recruits[["Yr"]] >= xlim[1] &
recruits[["Yr"]] <= xlim[2],
models[uncertainty]
],
na.rm = TRUE
)
}
# do some automatic scaling of the units
ylab <- labels[4]
yunits <- 1
if (ylim[2] > 1e3 & ylim[2] < 1e6) {
# if max recruits a million and a billion
yunits <- 1e3
ylab <- gsub("1,000s", "millions", ylab)
}
if (ylim[2] > 1e6) {
# if max is greater than a billion (e.g. pacific hake)
yunits <- 1e6
ylab <- gsub("1,000s", "billions", ylab)
}
# plot lines showing recruitment
if (spacepoints %in% c(0, 1, FALSE)) { # don't spread out points
matplot(recruits[["Yr"]][-(1:2)], recruits[-(1:2), models],
col = col, pch = pch, lty = lty, lwd = lwd, type = type,
xlim = xlim, ylim = ylim,
xlab = labels[1], ylab = ylab, xaxs = xaxs, yaxs = yaxs,
axes = FALSE, add = add
)
} else {
# spread out points with interval equal to spacepoints and
# staggering equal to staggerpoints
matplot(recruits[["Yr"]][-(1:2)], recruits[-(1:2), models],
col = col, pch = pch, lty = lty, lwd = lwd, type = "l",
xlim = xlim, ylim = ylim,
xlab = labels[1], ylab = ylab, xaxs = xaxs, yaxs = yaxs,
axes = FALSE, add = add
)
if (type != "l") {
recruits2 <- recruits
for (iline in 1:nlines) {
imodel <- models[iline]
recruits2[(recruits2[["Yr"]] %% spacepoints - initpoint) !=
(staggerpoints * iline) %% spacepoints, imodel] <- NA
}
matplot(recruits2[["Yr"]][-(1:2)], recruits2[-(1:2), models],
col = col, pch = pch, lty = lty, lwd = lwd, type = "p",
xlab = labels[1], ylab = ylab, xaxs = xaxs, yaxs = yaxs,
axes = FALSE, ylim = ylim, add = TRUE
)
}
}
## Add points at equilibrium values. Note: I adapted this logic from the
## SSB plot above.
if (show_uncertainty) {
xEqu <- recruits[["Yr"]][2] - (1:nlines) / nlines
} else {
xEqu <- rep(recruits[["Yr"]][1], nlines)
}
if (show_equilibrium) {
points(
x = xEqu, y = recruits[1, models], col = col, pch = pch,
cex = 1.2, lwd = lwd
)
}
# add uncertainty intervals when requested
if (show_uncertainty) {
for (iline in 1:nlines) {
imodel <- models[iline]
if (uncertainty[imodel]) {
## plot all but equilbrium values
xvec <- recruits[["Yr"]]
if (nlines > 1) xvec <- xvec + 0.4 * iline / nlines - 0.2
old_warn <- options()$warn # previous setting
options(warn = -1) # turn off "zero-length arrow" warning
# arrows (-2 in vectors below is to remove initial year recruitment)
arrows(
x0 = xvec[-c(1, 2)],
y0 = pmax(as.numeric(recruitsLower[-c(1, 2), imodel]), 0),
x1 = xvec[-c(1, 2)],
y1 = as.numeric(recruitsUpper[-c(1, 2), imodel]),
length = 0.01, angle = 90, code = 3, col = col[imodel]
)
options(warn = old_warn) # returning to old value
if (show_equilibrium) {
arrows(
x0 = xEqu[imodel],
y0 = pmax(as.numeric(recruitsLower[1, imodel]), 0),
x1 = xEqu[imodel],
y1 = as.numeric(recruitsUpper[1, imodel]),
length = 0.01, angle = 90, code = 3, col = col[imodel]
)
}
}
}
}
abline(h = 0, col = "grey")
if (legend) {
# add legend if requested
add_legend(legendlabels,
legendloc = legendloc,
legendorder = legendorder,
legendncol = legendncol,
col = col,
pch = pch,
lwd = lwd,
lty = lty
)
}
if (!add) {
if (tickEndYr) { # include ending year in axis labels
# default tick positions if axis(1) were run
ticks <- graphics::axTicks(1)
# make axis (excluding anything after the max ending year)
axis(1, at = c(ticks[ticks < max(endyrvec)], max(endyrvec)))
} else {
# nothing special (may include labels beyond the ending year)
axis(1)
}
# add shaded area over forecast years if more than 1 forecast year shown
if (!is.null(endyrvec) &
max(endyrvec) > 1 + max(endyrs) &
shadeForecast) {
rect(
xleft = max(endyrs) + 1, ybottom = par()$usr[3],
xright = par()$usr[2], ytop = par()$usr[4],
col = gray(0, alpha = 0.1), border = NA
)
}
yticks <- pretty(ylim)
axis(2, at = yticks, labels = format(yticks / yunits), las = 1)
box()
}
# return upper y-limit
return(ylim[2])
}
plotRecDevs <- function(show_uncertainty = TRUE) { # plot recruit deviations
# test for bad values
if (any(is.na(recdevs[["Yr"]]))) {
warning("Recdevs associated with initial age structure may not be shown")
}
# only show uncertainty if values are present for at least one model
if (!any(uncertainty)) {
show_uncertainty <- FALSE
}
# empty plot
if (is.null(xlim)) {
xlim <- range(recdevs[["Yr"]], na.rm = TRUE)
if (!is.null(endyrvec) & all(endyrvec < max(xlim))) {
xlim[2] <- max(endyrvec)
}
}
ylim <- ylimAdj * range(recdevs[
recdevs[["Yr"]] >= xlim[1] &
recdevs[["Yr"]] <= xlim[2],
models
], na.rm = TRUE)
if (any(is.infinite(ylim))) {
warning(
"Skipping recdev plots. Infinite ylim may indicate ",
'all values are NA in summaryoutput[["recdevs"]]'
)
return(ylim[2])
}
if (show_uncertainty) {
if (all(is.na(recdevsLower[, models]))) {
# can't do uncertainty if no range present
return(invisible(NA))
}
ylim <- ylimAdj * range(recdevsLower[
recdevs[["Yr"]] >= xlim[1] &
recdevs[["Yr"]] <= xlim[2],
models
],
recdevsUpper[
recdevs[["Yr"]] >= xlim[1] &
recdevs[["Yr"]] <= xlim[2],
models
],
na.rm = TRUE
)
}
ylim <- range(-ylim, ylim) # make symmetric
if (!add) {
plot(0,
xlim = xlim, ylim = ylim, axes = FALSE,
type = "n", xlab = labels[1], ylab = labels[5], xaxs = xaxs,
yaxs = yaxs, las = 1
)
axis(2, las = 1)
abline(h = 0, col = "grey")
}
if (show_uncertainty) {
for (iline in 1:nlines) {
imodel <- models[iline]
if (uncertainty[imodel]) {
xvec <- recdevs[["Yr"]]
if (nlines > 1) xvec <- xvec + 0.4 * iline / nlines - 0.2
arrows(
x0 = xvec, y0 = as.numeric(recdevsLower[, imodel]),
x1 = xvec, y1 = as.numeric(recdevsUpper[, imodel]),
length = 0.01, angle = 90, code = 3, col = col[iline]
)
}
}
}
# loop over vector of models to add lines
for (iline in 1:nlines) {
imodel <- models[iline]
yvec <- recdevs[, imodel]
xvec <- recdevs[["Yr"]]
points(xvec, yvec, pch = pch[iline], lwd = lwd[iline], col = col[iline])
}
if (!add) {
if (tickEndYr) { # include ending year in axis labels
# default tick positions if axis(1) were run
ticks <- graphics::axTicks(1)
# make axis (excluding anything after the max ending year)
axis(1, at = c(ticks[ticks < max(endyrvec)], max(endyrvec)))
} else {
# nothing special (may include labels beyond the ending year)
axis(1)
}
# add shaded area over forecast years if more than 1 forecast year shown
if (!is.null(endyrvec) &
max(endyrvec) > 1 + max(endyrs) &
shadeForecast) {
rect(
xleft = max(endyrs) + 1, ybottom = par()$usr[3],
xright = par()$usr[2], ytop = par()$usr[4],
col = gray(0, alpha = 0.1), border = NA
)
}
box()
}
if (legend) {
# add legend if requested
add_legend(legendlabels,
legendloc = legendloc,
legendorder = legendorder,
legendncol = legendncol,
col = col,
pch = pch,
lwd = lwd,
lty = lty
)
}
# return upper y-limit
return(ylim[2])
}
## xmax <- 1.1*max(reldep)
## ymax <- 1.1*max(1,relspr[!is.na(relspr)])
## ylab <- managementratiolabels[1,2]
## phasefunc <- function(){
## if(!add) plot(reldep,relspr,xlab="B/Btarget",
## xlim=c(0,xmax),ylim=c(0,ymax),ylab=ylab,type="n")
## lines(reldep,relspr,type="o",col=col2)
## abline(h=0,col="grey")
## abline(v=0,col="grey")
## lines(reldep,relspr,type="o",col=col2)
## points(reldep[length(reldep)],relspr[length(relspr)],col=col4,pch=19)
## abline(h=1,col=col4,lty=2)
## abline(v=1,col=col4,lty=2)}
plotPhase <- function(show_uncertainty = TRUE) {
# plot biomass ratio vs. SPRratio
# only show uncertainty if values are present for at least one model
if (!any(uncertainty)) {
show_uncertainty <- FALSE
}
# get axis limits
xlim <- range(0, ylimAdj * Bratio[, models], na.rm = TRUE)
ylim <- range(0, ylimAdj * SPRratio[, models], na.rm = TRUE)
# make plot
if (!add) {
plot(0,
type = "n", xlim = xlim, ylim = ylim, xlab = labels[3],
ylab = SPRratioLabel, xaxs = xaxs, yaxs = yaxs, las = 1
)
}
goodyrs <- intersect(Bratio[["Yr"]], SPRratio[["Yr"]])
lastyr <- max(goodyrs)
for (iline in 1:nlines) {
imodel <- models[iline]
# no option get to stagger points in phase plots,
# only the last point is marked
xvals <- Bratio[Bratio[["Yr"]] %in% goodyrs, imodel]
yvals <- SPRratio[SPRratio[["Yr"]] %in% goodyrs, imodel]
lines(xvals,
yvals,
col = col[iline],
lty = lty[iline], lwd = lwd[iline],
type = "l"
) # no user control of type to add points
# NA values and missing points will occur if final year is different
points(tail(xvals, 1),
tail(yvals, 1),
col = col[iline],
pch = pch[iline], lwd = lwd[iline]
)
}
abline(h = 1, v = 1, col = "grey", lty = 2)
if (btarg > 0) abline(v = btarg, col = "red", lty = 2)
if (sprtarg > 0) abline(h = sprtarg, col = "red", lty = 2)
if (legend) {
# add legend if requested
add_legend(legendlabels,
legendloc = legendloc,
legendorder = legendorder,
legendncol = legendncol,
col = col,
pch = pch,
lwd = lwd,
lty = lty
)
}
# return upper y-limit
return(ylim[2])
}
plotIndices <- function(log = FALSE, iindex) {
# function to plot different fits to a single index of abundance
# get a subset of index table including only 1 index per model
# (hopefully matching each other)
indices2 <- NULL
for (iline in 1:nlines) {
imodel <- models[iline]
subset2 <- indices[["imodel"]] == imodel &
indices[["Yr"]] <= endyrvec[iline] &
indices[["Fleet"]] == indexfleets[[imodel]][iindex]
indices2 <- rbind(indices2, indices[subset2, ])
}
# get quantities for plot
yr <- indices2[["Yr"]]
obs <- indices2[["Obs"]]
exp <- indices2[["Exp"]]
imodel <- indices2[["imodel"]]
Q <- indices2[["Calc_Q"]]
if (log) {
obs <- log(obs)
exp <- log(exp)
ylab <- labels[7]
} else {
ylab <- labels[6]
}
# get uncertainty intervals if requested
if (indexUncertainty) {
if (indexPlotEach) {
if (is.null(indexSEvec)) {
indexSEvec <- indices2[["SE"]]
}
y <- obs
if (log) {
upper <- qnorm(.975, mean = y, sd = indexSEvec)
lower <- qnorm(.025, mean = y, sd = indexSEvec)
} else {
upper <- qlnorm(.975, meanlog = log(y), sdlog = indexSEvec)
lower <- qlnorm(.025, meanlog = log(y), sdlog = indexSEvec)
}
} else {
subset <- indices2[["imodel"]] == models[1]
if (is.null(indexSEvec)) {
indexSEvec <- indices2[["SE"]][subset]
}
y <- obs
if (log) {
upper <- qnorm(.975, mean = y, sd = indexSEvec)
lower <- qnorm(.025, mean = y, sd = indexSEvec)
} else {
upper <- qlnorm(.975, meanlog = log(y), sdlog = indexSEvec)
lower <- qlnorm(.025, meanlog = log(y), sdlog = indexSEvec)
}
}
} else {
upper <- NULL
lower <- NULL
}
### make plot of index fits
# calculate ylim (excluding dummy observations from observed but not expected)
sub <- !is.na(indices2[["Like"]])
ylim <- range(exp, obs[sub], lower[sub], upper[sub], na.rm = TRUE)
# if no values included in subset, then set ylim based on all values
if (!any(sub)) {
ylim <- range(exp, obs, lower, upper, na.rm = TRUE)
}
if (!log) {
# 0 included if not in log space
ylim <- c(0, ylimAdj * ylim[2])
} else {
# add padding on top and bottom
ylim <- ylim + c(-1, 1) * (ylimAdj - 1) * diff(ylim)
}
meanQ <- rep(NA, nlines)
if (!add) {
if (!is.null(endyrvec)) {
xlim <- c(min(yr), max(endyrvec))
} else {
xlim <- range(yr)
}
plot(0,
type = "n", xlim = xlim, yaxs = yaxs,
ylim = ylim, xlab = "Year", ylab = ylab, axes = FALSE
)
}
if (!log & yaxs != "i") {
abline(h = 0, col = "grey")
}
Qtext <- rep("(Q =", nlines)
for (iline in (1:nlines)[!mcmcVec]) {
imodel <- models[iline]
subset <- indices2[["imodel"]] == imodel
meanQ[iline] <- mean(Q[subset])
if (indexQlabel && any(Q[subset] != mean(Q[subset]))) {
Qtext[iline] <- "(mean Q ="
}
x <- yr[subset]
y <- exp[subset]
lines(x, y,
pch = pch[iline], lwd = lwd[iline],
lty = lty[iline], col = col[iline], type = type
)
}
legendlabels2 <- legendlabels
if (indexQlabel) {
legendlabels2 <- paste(
legendlabels, Qtext,
format(meanQ, digits = indexQdigits), ")"
)
}
if (legend) {
# add legend if requested
add_legend(legendlabels,
legendloc = legendloc,
legendorder = legendorder,
legendncol = legendncol,
col = col,
pch = pch,
lwd = lwd,
lty = lty
)
}
if (indexPlotEach) {
# plot observed values for each model (staggered slightly)
for (iline in (1:nlines)[!mcmcVec]) {
adj <- 0.2 * iline / nlines - 0.1
imodel <- models[iline]
if (any(is.na(indices2[["like"]]))) {
warning("NA's found in likelihood, may cause issues with index plots")
}
subset <- indices2[["imodel"]] == imodel & !is.na(indices2[["Like"]])
# add uncertainty intervals if requested
if (indexUncertainty) {
arrows(
x0 = yr[subset] + adj, y0 = lower[subset],
x1 = yr[subset] + adj, y1 = upper[subset],
length = 0.01, angle = 90, code = 3,
# colors have hard-wired alpha value of 0.7
col = adjustcolor(col, alpha.f = 0.7)[iline]
)
}
# add points on top of intervals
points(yr[subset] + adj, obs[subset],
pch = 21, cex = 1.5, col = 1,
bg = adjustcolor(col, alpha.f = 0.7)[iline]
)
}
} else {
# plot only the first model
imodel <- models[which(endyrvec == max(endyrvec))[1]]
subset <- indices2[["imodel"]] == imodel & !is.na(indices2[["Like"]])
# add uncertainty intervals if requested
if (indexUncertainty) {
arrows(
x0 = yr[subset], y0 = lower[subset],
x1 = yr[subset], y1 = upper[subset],
length = 0.01, angle = 90, code = 3, col = 1
)
}
# add points on top of intervals
points(yr[subset], obs[subset], pch = 16, cex = 1.5)
}
# if not added to existing plot then add axis labels and box
if (!add) {
xticks <- pretty(xlim)
axis(1, at = xticks, labels = format(xticks))
if (tickEndYr) {
axis(1, at = max(endyrvec))
}
axis(2)
box()
}
# return upper y-limit
return(ylim[2])
} # end plotIndices function
plotDensities <- function(parname, xlab, denslwd, limit0 = TRUE,
cumulative = FALSE) {
if (any(!mcmcVec)) {
vals <- rbind(
pars[pars[["Label"]] == parname, names(pars) != "recdev"],
quants[quants[["Label"]] == parname, ]
)
if (nrow(vals) != 1) {
warn <- paste("problem getting values for parameter:", parname, "")
if (nrow(vals) == 0) {
warn <- paste(
warn,
"no Labels match in either parameters or derived quantities"
)
}
if (nrow(vals) > 0) {
warn <- paste(
warn,
"Too many matching Labels:",
pars[["Label"]][pars[["Label"]] == parname],
quants[["Label"]][quants[["Label"]] == parname]
)
}
warning(warn)
# previous versions had an else statement,
# but this will end the function here instead and saves indenting
return(NULL)
}
valSDs <- rbind(
parsSD[pars[["Label"]] == parname, ],
quantsSD[quants[["Label"]] == parname, ]
)
}
xmax <- xmin <- ymax <- NULL # placeholder for limits
# placeholder for the mcmc density estimates, if there are any
mcmcDens <- vector(mode = "list", length = nlines)
# loop over models to set range
good <- rep(TRUE, nlines) # indicator of which values to plot
for (iline in 1:nlines) {
imodel <- models[iline]
if (mcmcVec[iline]) {
# figure out which columns of posteriors to use
mcmcColumn <- grep(parname, colnames(mcmc[[imodel]]), fixed = TRUE)
# warn if it can't find the columns
if (length(mcmcColumn) == 0) {
message(
"No columns selected from MCMC for '", parname,
"' in model ", imodel
)
good[iline] <- FALSE
}
# warn if too many columns
if (length(mcmcColumn) > 1) {
warning(
"Too many columns selected from MCMC for model ",
imodel, ":", paste0(names(mcmc[[imodel]])[mcmcColumn],
collapse = ", "
),
". Please specify a unique label in the mcmc dataframe",
"or specify mcmcVec = FALSE for model ",
imodel, " (or mcmcVec = FALSE applying to all models). "
)
good[iline] <- FALSE
}
# add density
if (good[iline]) {
mcmcVals <- mcmc[[imodel]][, mcmcColumn]
xmin <- min(xmin, quantile(mcmcVals, 0.005, na.rm = TRUE))
if (limit0) {
xmin <- max(0, xmin) # by default no plot can go below 0
}
if (fix0 & !grepl("R0", parname)) {
xmin <- 0 # include 0 if requested (except for log(R0) plots)
}
xmax <- max(xmax, quantile(mcmcVals, 0.995, na.rm = TRUE))
# density estimate of mcmc sample (posterior)
z <- density(mcmcVals, cut = 0, adjust = densityadjust)
z[["x"]] <- z[["x"]][c(1, 1:length(z[["x"]]), length(z[["x"]]))]
# just to make sure that a good looking polygon is created
z[["y"]] <- c(0, z[["y"]], 0)
ymax <- max(ymax, max(z[["y"]])) # update ymax
mcmcDens[[iline]] <- z # save density estimate for later plotting
}
} else {
parval <- vals[1, imodel]
parSD <- valSDs[1, imodel]
if (!is.numeric(parval)) parval <- -1 # do this in case models added without the parameter
if (!is.na(parSD) && parSD > 0) { # if non-zero SD available
# update x range
xmin <- min(xmin, qnorm(0.005, parval, parSD))
if (limit0) xmin <- max(0, xmin) # by default no plot can go below 0
if (fix0 & !grepl("R0", parname)) xmin <- 0 # include 0 if requested (except for log(R0) plots)
xmax <- max(xmax, qnorm(0.995, parval, parSD))
# calculate density to get y range
x <- seq(xmin, xmax, length = 500)
mle <- dnorm(x, parval, parSD)
mlescale <- 1 / (sum(mle) * mean(diff(x)))
mle <- mle * mlescale
# update ymax
ymax <- max(ymax, max(mle))
} else { # if no SD, at least make sure interval includes MLE estimate
xmin <- min(xmin, parval)
xmax <- max(xmax, parval)
}
}
}
if (grepl("Bratio", parname)) {
xmin <- 0 # xmin=0 for relative spawning biomass plots
}
if (limit0) {
xmin <- max(0, xmin) # by default no plot can go below 0
}
if (fix0 & !grepl("R0", parname)) {
xmin <- 0 # include 0 if requested (except for log(R0) plots)
}
# calculate x-limits and vector of values for densities
xlim <- c(xmin, xmin + (xmax - xmin) * densityscalex)
x <- seq(xmin, xmax, length = 500)
# calculate some scaling stuff
xunits <- 1
if (rescale & xmax > 1e3 & xmax < 3e6) {
xunits <- 1e3
# xlab <- gsub("mt","x1000 mt",xlab)
xlab2 <- "'1000 t"
}
if (rescale & xmax > 3e6) {
xunits <- 1e6
# xlab <- gsub("mt","million mt",xlab)
xlab2 <- "million t"
}
# make empty plot
if (is.null(ymax)) {
message(
" skipping plot of ", parname,
" because it seems to not be estimated in any model"
)
} else {
par(par)
if (!add) {
if (cumulative) {
plot(0,
type = "n", xlim = xlim, axes = FALSE, xaxs = "i", yaxs = yaxs,
ylim = c(0, 1), xlab = xlab, ylab = ""
)
} else {
plot(0,
type = "n", xlim = xlim, axes = FALSE, xaxs = "i", yaxs = yaxs,
ylim = c(0, 1.1 * ymax * densityscaley), xlab = xlab, ylab = ""
)
}
}
# add vertical lines for target and threshold
# relative spawning biomass values
if (grepl("Bratio", parname)) {
if (btarg > 0) {
abline(v = btarg, col = "red", lty = 2)
text(btarg + 0.03, par()$usr[4], labels[10], adj = 1.05, srt = 90)
}
if (minbthresh > 0) {
abline(v = minbthresh, col = "red", lty = 2)
text(minbthresh + 0.03, par()$usr[4], labels[11],
adj = 1.05, srt = 90
)
}
}
symbolsQuants <- c(0.025, 0.125, 0.25, 0.5, 0.75, 0.875, 0.975)
# loop again to make plots
for (iline in (1:nlines)[good]) {
imodel <- models[iline]
if (mcmcVec[iline]) {
# make density for MCMC posterior
mcmcColumn <- grep(parname, colnames(mcmc[[imodel]]), fixed = TRUE)
mcmcVals <- mcmc[[imodel]][, mcmcColumn]
# for symbols on plot
x2 <- quantile(mcmcVals, symbolsQuants, na.rm = TRUE)
# find the positions in the density closest to these quantiles
x <- mcmcDens[[iline]][["x"]]
if (!cumulative) {
y <- mcmcDens[[iline]][["y"]]
yscale <- 1 / (sum(y) * mean(diff(x)))
y <- y * yscale
} else {
y <- cumsum(mcmcDens[[iline]][["y"]]) / sum(mcmcDens[[iline]][["y"]])
}
y2 <- NULL
for (ii in x2) {
# find y-value associated with closest matching x-value
# "min" was added for rare case where two values are equally close
y2 <- c(y2, min(y[abs(x - ii) == min(abs(x - ii))]))
}
# make shaded polygon
if (!cumulative) {
polygon(c(x[1], x, rev(x)[1]), c(0, y, 0),
col = shadecol[iline],
border = NA
)
} else {
# polygon for cumulative has extra point in bottom right
polygon(c(x[1], x, rev(x)[c(1, 1)]), c(0, y, 1, 0),
col = shadecol[iline], border = NA
)
}
# add thicker line
lines(x, y, col = col[iline], lwd = 2)
# add points on line and vertical line at median (hopefully)
if (!cumulative) {
if (densitysymbols) {
points(x2, y2, col = col[iline], pch = pch[iline])
}
# really hokey and assumes that the middle value of
# the vector of quantiles is the median
lines(rep(x2[median(1:length(x2))], 2),
c(0, y2[median(1:length(x2))]),
col = col[iline]
)
} else {
if (densitysymbols) {
points(x2, symbolsQuants, col = col[iline], pch = pch[iline])
}
lines(rep(median(mcmcVals), 2), c(0, 0.5), col = col[iline])
}
} else {
# make normal density for MLE
parval <- vals[1, imodel]
parSD <- valSDs[1, imodel]
if (!is.na(parSD) && parSD > 0) {
xmin <- min(xmin, qnorm(0.005, parval, parSD))
if (limit0) {
xmin <- max(0, xmin) # by default no plot can go below 0
}
if (fix0 & !grepl("R0", parname)) {
xmin <- 0 # include 0 if requested (except for log(R0) plots)
}
x <- seq(xmin, max(xmax, xlim), length = 500)
# x2 <- parval+(-2:2)*parSD # 1 and 2 SDs away from mean to plot symbols
x2 <- qnorm(symbolsQuants, parval, parSD)
if (cumulative) {
y <- mle <- pnorm(x, parval, parSD) # smooth line
y2 <- mle2 <- pnorm(x2, parval, parSD) # symbols
} else {
mle <- dnorm(x, parval, parSD) # smooth line
mle2 <- dnorm(x2, parval, parSD) # symbols
mlescale <- 1 / (sum(mle) * mean(diff(x)))
y <- mle <- mle * mlescale
y2 <- mle2 <- mle2 * mlescale
}
# add shaded polygons
polygon(c(x[1], x, rev(x)[1]), c(0, mle, 0),
col = shadecol[iline], border = NA
)
lines(x, mle, col = col[iline], lwd = 2)
if (!cumulative) {
if (densitysymbols) {
points(x2, mle2, col = col[iline], pch = pch[iline])
}
lines(rep(parval, 2),
c(0, dnorm(parval, parval, parSD) * mlescale),
col = col[iline], lwd = denslwd
)
} else {
if (densitysymbols) {
points(x2, symbolsQuants, col = col[iline], pch = pch[iline])
}
lines(rep(parval, 2),
c(0, 0.5),
col = col[iline], lwd = denslwd
)
}
} else {
# add vertical line for estimate of no density can be added
abline(v = parval, col = col[iline], lwd = denslwd)
}
}
# should be able to move more stuff into this section
# that applies to both MLE and MCMC
if (densitytails & densitymiddle) {
warning(
"You are shading both tails and central 95% of density plots",
"which is illogical"
)
}
doShade <- FALSE
if (mcmcVec[iline]) {
doShade <- TRUE
} else {
if (!is.na(parSD) && parSD > 0) {
doShade <- TRUE
}
}
if (densitytails & doShade) {
# figure out which points are in the tails of the distibutions
x.lower <- x[x <= x2[1]]
y.lower <- y[x <= x2[1]]
x.upper <- x[x >= rev(x2)[1]]
y.upper <- y[x >= rev(x2)[1]]
# add darker shading for tails
polygon(c(x.lower[1], x.lower, rev(x.lower)[1]),
c(0, y.lower, 0),
col = shadecol[iline], border = NA
)
polygon(c(x.upper[1], x.upper, rev(x.upper)[1]),
c(0, y.upper, 0),
col = shadecol[iline], border = NA
)
}
if (densitymiddle & doShade) { # } & !is.na(parSD) && parSD>0){
x.middle <- x[x >= x2[1] & x <= rev(x2)[1]]
y.middle <- y[x >= x2[1] & x <= rev(x2)[1]]
polygon(c(x.middle[1], x.middle, rev(x.middle)[1]),
c(0, y.middle, 0),
col = shadecol[iline], border = NA
)
}
}
# add axes and labels
if (!add) {
abline(h = 0, col = "grey")
xticks <- pretty(xlim)
axis(1, at = xticks, labels = format(xticks / xunits))
theLine <- par()$mgp[1]
if (cumulative) {
axis(2,
at = symbolsQuants, labels = format(symbolsQuants),
cex.axis = 0.9
)
mtext(
side = 2, line = theLine,
text = "Cumulative Probability",
col = par()$col.lab, cex = par()$cex.lab
)
} else {
mtext(
side = 2, line = theLine, text = labels[9],
col = par()$col.lab, cex = par()$cex.lab
)
}
box()
}
if (xunits != 1) {
message(
"x-axis for ", parname, " in density plot has been divided by ",
xunits, " (so may be in units of ", xlab2, ")"
)
}
# add legend
if (legend) {
add_legend(legendlabels,
# override legend location for cumulative plots
# where topleft should always work best
legendloc = ifelse(cumulative, "topleft", legendloc),
legendorder = legendorder,
legendncol = legendncol,
col = col,
pch = pch,
lwd = lwd,
lty = lty
)
}
}
# in the future, this could return the upper y-limit,
# currently there's no control over ylim in these plots
return(NA)
} # end plotDensities function
uncertaintyplots <- intersect(c(2, 4, 6, 8, 10, 12), subplots)
if (!any(uncertainty) & length(uncertaintyplots) > 0) {
# warn if uncertainty is off but uncertainty plots are requested
message(
"skipping plots with uncertainty:",
paste(uncertaintyplots, collapse = ",")
)
}
# subplot 1: spawning biomass
if (1 %in% subplots) {
if (verbose) {
message("subplot 1: spawning biomass")
}
if (plot) {
ymax_vec[1] <- plotSpawnBio(show_uncertainty = FALSE)
}
if (print) {
save_png_comparisons("compare1_spawnbio.png")
ymax_vec[1] <- plotSpawnBio(show_uncertainty = FALSE)
dev.off()
}
}
# subplot 2: spawning biomass with uncertainty intervals
if (2 %in% subplots) {
if (any(uncertainty)) {
if (verbose) {
message("subplot 2: spawning biomass with uncertainty intervals")
}
if (plot) {
ymax_vec[2] <- plotSpawnBio(show_uncertainty = TRUE)
}
if (print) {
save_png_comparisons("compare2_spawnbio_uncertainty.png")
ymax_vec[2] <- plotSpawnBio(show_uncertainty = TRUE)
dev.off()
}
}
}
# subplot 3: biomass ratio
# (hopefully equal to spawning relative spawning biomass)
if (3 %in% subplots) {
if (verbose) {
message("subplot 3: biomass ratio (hopefully equal to fraction of unfished)")
}
if (plot) {
ymax_vec[3] <- plotBratio(show_uncertainty = FALSE)
}
if (print) {
save_png_comparisons("compare3_Bratio.png")
ymax_vec[3] <- plotBratio(show_uncertainty = FALSE)
dev.off()
}
}
# subplot 4: biomass ratio with uncertainty
if (4 %in% subplots) {
if (any(uncertainty)) {
if (verbose) {
message("subplot 4: biomass ratio with uncertainty")
}
if (plot) {
ymax_vec[4] <- plotBratio(show_uncertainty = TRUE)
}
if (print) {
save_png_comparisons("compare4_Bratio_uncertainty.png")
ymax_vec[4] <- plotBratio(show_uncertainty = TRUE)
dev.off()
}
}
}
# subplot 5: SPR ratio
if (5 %in% subplots) {
if (verbose) {
message("subplot 5: SPR ratio")
}
if (plot) {
ymax_vec[5] <- plotSPRratio(show_uncertainty = FALSE)
}
if (print) {
save_png_comparisons("compare5_SPRratio.png")
ymax_vec[5] <- plotSPRratio(show_uncertainty = FALSE)
dev.off()
}
}
# subplot 6: SPR ratio with uncertainty
if (6 %in% subplots) {
if (any(uncertainty)) {
if (verbose) {
message("subplot 6: SPR ratio with uncertainty")
}
if (plot) {
ymax_vec[6] <- plotSPRratio(show_uncertainty = TRUE)
}
if (print) {
save_png_comparisons("compare6_SPRratio_uncertainty.png")
ymax_vec[6] <- plotSPRratio(show_uncertainty = TRUE)
dev.off()
}
}
}
# subplot 7: F (harvest rate or fishing mortality, however defined)
if (7 %in% subplots) {
if (verbose) {
message("subplot 7: F value")
}
if (plot) {
ymax_vec[7] <- plotF(show_uncertainty = FALSE)
}
if (print) {
save_png_comparisons("compare7_Fvalue.png")
ymax_vec[7] <- plotF(show_uncertainty = FALSE)
dev.off()
}
}
# subplot 8: F (harvest rate or fishing mortality, however defined)
# with uncertainty
if (8 %in% subplots) {
if (any(uncertainty)) {
if (verbose) {
message("subplot 8: F value with uncertainty")
}
if (plot) {
ymax_vec[8] <- plotF(show_uncertainty = TRUE)
}
if (print) {
save_png_comparisons("compare8_Fvalue_uncertainty.png")
ymax_vec[8] <- plotF(show_uncertainty = TRUE)
dev.off()
}
}
}
# subplot 9: recruits
if (9 %in% subplots) {
if (verbose) {
message("subplot 9: recruits")
}
if (plot) {
ymax_vec[9] <- plotRecruits(show_uncertainty = FALSE)
}
if (print) {
save_png_comparisons("compare9_recruits.png")
ymax_vec[9] <- plotRecruits(show_uncertainty = FALSE)
dev.off()
}
}
# subplot 10: recruits with uncertainty
if (10 %in% subplots) {
if (any(uncertainty)) {
if (verbose) {
message("subplot 10: recruits with uncertainty")
}
if (plot) {
ymax_vec[10] <- plotRecruits()
}
if (print) {
save_png_comparisons("compare10_recruits_uncertainty.png")
ymax_vec[10] <- plotRecruits()
dev.off()
}
}
}
# subplot 11: recruit devs
if (11 %in% subplots) {
if (verbose) message("subplot 11: recruit devs")
if (is.null(recdevs)) {
message("No recdevs present in the model summary, skipping plot.")
} else {
if (plot) {
ymax_vec[11] <- plotRecDevs(show_uncertainty = FALSE)
}
if (print) {
save_png_comparisons("compare11_recdevs.png")
ymax_vec[11] <- plotRecDevs(show_uncertainty = FALSE)
dev.off()
}
}
}
# subplot 12: recruit devs with uncertainty
if (12 %in% subplots) {
if (any(uncertainty)) {
if (verbose) {
message("subplot 12: recruit devs with uncertainty")
}
if (plot) {
ymax_vec[12] <- plotRecDevs()
}
if (print) {
save_png_comparisons("compare12_recdevs_uncertainty.png")
ymax_vec[12] <- plotRecDevs()
dev.off()
}
}
}
# subplot 13: index fits
if (13 %in% subplots & !is.null(indices) && nrow(indices) > 0) {
if (verbose) {
message("subplot 13: index fits")
}
for (iindex in 1:length(indexfleets[[1]])) {
if (plot) {
ymax_vec[13] <- plotIndices(log = FALSE, iindex = iindex)
}
if (print) {
save_png_comparisons(paste0(
"compare13_indices",
index_plot_suffix[iindex],
".png"
))
ymax_vec[13] <- plotIndices(log = FALSE, iindex = iindex)
dev.off()
}
} # end loop over indices to plot
} # end check for subplot 13
# subplot 14: index fits on a log scale
if (14 %in% subplots & !is.null(indices) && nrow(indices) > 0) {
if (verbose) {
message("subplot 14: index fits on a log scale")
}
for (iindex in 1:length(indexfleets[[1]])) {
if (plot) {
ymax_vec[14] <- plotIndices(log = TRUE, iindex = iindex)
}
if (print) {
save_png_comparisons(paste0(
"compare14_indices_log",
index_plot_suffix[iindex],
".png"
))
ymax_vec[14] <- plotIndices(log = TRUE, iindex = iindex)
dev.off()
}
} # end loop over indices to plot
} # end check for subplot 14
#### unfinished addition of phase plot comparisons
## # subplot 15: phase plot
if (15 %in% subplots) {
if (verbose) {
message("subplot 15: phase plot")
}
if (plot) {
ymax_vec[15] <- plotPhase()
}
if (print) {
save_png_comparisons("compare15_phase_plot.png")
ymax_vec[15] <- plotPhase()
dev.off()
}
}
# subplot 16 and 17: densities, and cumulative probability plots
if (16 %in% subplots | 17 %in% subplots) {
if (any(uncertainty)) {
if (verbose) {
message("subplots 16 and 17: densities")
}
# look for all parameters or derived quantities matching
# the input list of names
expandednames <- NULL
for (i in 1:length(densitynames)) {
matchingnames <- c(
pars[["Label"]],
quants[["Label"]]
)[grep(densitynames[i],
c(pars[["Label"]], quants[["Label"]]),
fixed = TRUE
)]
expandednames <- c(expandednames, matchingnames)
}
if (length(expandednames) == 0) {
warning("No parameter/quantity names matching 'densitynames' input.")
} else {
message(
"Parameter/quantity names matching 'densitynames' input:\n",
paste0(expandednames, collapse = ", ")
)
ndensities <- length(expandednames)
# make a table to store associated x-labels
densitytable <- data.frame(
name = expandednames,
label = expandednames,
stringsAsFactors = FALSE
)
if (!is.null(densityxlabs) && length(densityxlabs) == ndensities) {
densitytable[["label"]] <- densityxlabs
message(
" table of parameter/quantity labels with associated",
" x-axis label:"
)
print(densitytable)
} else {
if (!is.null(densityxlabs)) {
warning(
"length of 'densityxlabs' doesn't match the number of values ",
"matching 'densitynames' so parameter labels will be used instead"
)
}
}
# loop over parameters for densitities
if (16 %in% subplots) {
for (iplot in 1:ndensities) {
# find matching parameter
name <- densitytable[iplot, 1]
xlab <- densitytable[iplot, 2]
# if(verbose) message(" quantity name=",name,"\n",sep="")
if (plot) {
ymax_vec[16] <- plotDensities(
parname = name, xlab = xlab,
denslwd = densitylwd
)
}
if (print) {
save_png_comparisons(paste("compare16_densities_", name, ".png", sep = ""))
ymax_vec[16] <- plotDensities(
parname = name, xlab = xlab,
denslwd = densitylwd
)
dev.off()
}
}
}
# loop again for cumulative densities
if (17 %in% subplots) {
for (iplot in 1:ndensities) {
# find matching parameter
name <- densitytable[iplot, 1]
xlab <- densitytable[iplot, 2]
# if(verbose) message(" quantity name=",name,"\n",sep="")
if (plot) {
ymax_vec[17] <- plotDensities(
parname = name, xlab = xlab,
denslwd = densitylwd,
cumulative = TRUE
)
}
if (print) {
save_png_comparisons(paste("compare17_densities_", name, ".png", sep = ""))
ymax_vec[17] <- plotDensities(
parname = name, xlab = xlab,
denslwd = densitylwd,
cumulative = TRUE
)
dev.off()
}
}
}
}
}
}
#### unfinished addition of growth comparisons
## # subplot 19: growth, females
## if(19 %in% subplots){
## if(verbose) message("subplot 19: growth, females\n")
## if(plot) plotgrowth(sex='f')
## if(print){
## save_png_comparisons("compare19_growth_females.png")
## plotgrowth(sex='f')
## dev.off()
## }
## }
## # subplot 20: growth, males
## if(20 %in% subplots){
## if(verbose) message("subplot 20: growth, males\n")
## if(plot) plotgrowth(sex='m')
## if(print){
## save_png_comparisons("compare20_growth_males.png")
## plotgrowth(sex='m')
## dev.off()
## }
## }
if (pdf) dev.off()
return(invisible(ymax_vec))
}
|
library(dplyr)
library(tidyr)
# Create a vector of features using the features.txt file
features <- read.csv("features.txt", sep = "", header = FALSE, col.names = c("code", "feature"))[, c("feature")]
# Create a data frame of activities using the activity label file
activities <- read.csv("activity_labels.txt", sep = "", header = FALSE, col.names = c("code", "activity"))
# Read in the train data
train <- read.csv("X_train.txt", sep = "", header = FALSE, col.names = features)
# Read in train activity code and merge it with the activities data frame
train_activities <- read.csv("y_train.txt", sep = "", header = FALSE, col.names = c("code"))
train_activities <- merge(activities, train_activities, by = c("code"))
# Read in train subjects
train_subjects <- read.csv("subject_train.txt", sep = "", header = FALSE, col.names = c("subject"))
# Remove those columns that are not means or standard deviation
train <- train[, grepl("mean|std", names(train), ignore.case = TRUE)]
# Bind the train subjects and activities to the train features
train <- cbind(subject = train_subjects[,c("subject")], activity = train_activities[,c("activity")],
train)
# Read in the test data
test <- read.csv("X_test.txt", sep = "", header = FALSE, col.names = features)
# Read in test activity code and merge it with the activities data frame
test_activities <- read.csv("y_test.txt", sep = "", header = FALSE, col.names = c("code"))
test_activities <- merge(activities, test_activities, by = c("code"))
# Read in train subjects
test_subjects <- read.csv("subject_test.txt", sep = "", header = FALSE, col.names = c("subject"))
# Remove those columns that are not means or standard deviation
test <- test[, grepl("mean|std", names(test), ignore.case = TRUE)]
# Bind the train activities to the train features
test <- cbind(subject = test_subjects[,c("subject")], activity = test_activities[,c("activity")],
test)
# Combine the train and test data sets
total <- rbind(train, test)
# Create a tidy data set of means for each feature grouped subject and activity
tidy <- total %>% group_by(subject, activity) %>% summarise_each(funs(mean))
| /run_analysis.R | no_license | jthoburn/Getting-and-Cleaning-Data | R | false | false | 2,157 | r | library(dplyr)
library(tidyr)
# Create a vector of features using the features.txt file
features <- read.csv("features.txt", sep = "", header = FALSE, col.names = c("code", "feature"))[, c("feature")]
# Create a data frame of activities using the activity label file
activities <- read.csv("activity_labels.txt", sep = "", header = FALSE, col.names = c("code", "activity"))
# Read in the train data
train <- read.csv("X_train.txt", sep = "", header = FALSE, col.names = features)
# Read in train activity code and merge it with the activities data frame
train_activities <- read.csv("y_train.txt", sep = "", header = FALSE, col.names = c("code"))
train_activities <- merge(activities, train_activities, by = c("code"))
# Read in train subjects
train_subjects <- read.csv("subject_train.txt", sep = "", header = FALSE, col.names = c("subject"))
# Remove those columns that are not means or standard deviation
train <- train[, grepl("mean|std", names(train), ignore.case = TRUE)]
# Bind the train subjects and activities to the train features
train <- cbind(subject = train_subjects[,c("subject")], activity = train_activities[,c("activity")],
train)
# Read in the test data
test <- read.csv("X_test.txt", sep = "", header = FALSE, col.names = features)
# Read in test activity code and merge it with the activities data frame
test_activities <- read.csv("y_test.txt", sep = "", header = FALSE, col.names = c("code"))
test_activities <- merge(activities, test_activities, by = c("code"))
# Read in train subjects
test_subjects <- read.csv("subject_test.txt", sep = "", header = FALSE, col.names = c("subject"))
# Remove those columns that are not means or standard deviation
test <- test[, grepl("mean|std", names(test), ignore.case = TRUE)]
# Bind the train activities to the train features
test <- cbind(subject = test_subjects[,c("subject")], activity = test_activities[,c("activity")],
test)
# Combine the train and test data sets
total <- rbind(train, test)
# Create a tidy data set of means for each feature grouped subject and activity
tidy <- total %>% group_by(subject, activity) %>% summarise_each(funs(mean))
|
library(tidyverse)
library(fuzzyjoin)
read_csv("AOUlist.csv", skip=1)%>%
select(`Scientific Name`, `Common Name`, Order, Family)%>%
filter(str_detect(`Scientific Name`, "Aphelocoma"))
read_csv("TRFO_bird_list.csv")%>%
select(-`General Grouping`)%>%
left_join(
read_csv("AOUlist.csv", skip=1)%>%
select(`Scientific Name`, `Common Name`, Order, Family),
by = "Common Name"
)%>%
filter(is.na(`Scientific Name`))
read_csv("data/NACC_list_species.csv")%>%
filter(str_detect(common_name, "Sage"))
bird_list<-read_csv("data/TRFO_bird_list.csv")%>%
select(-`General Grouping`)%>%
left_join(
read_csv("data/NACC_list_species.csv"),
by = c("Common Name" = "common_name")
)%>%
select(-french_name, -subfamily)%>%
select(`Common Name`, order:species)%>%
rename(common_name = `Common Name`)
list.files("bird_songs_of_the_rockies")%>%
bind_cols(list.files, "bird_songs_of_the_rockies", full.names = T)
files<-tibble(
file_name = list.files("bird_songs_of_the_rockies"),
audio_file_path = list.files("bird_songs_of_the_rockies", full.names = T)
)%>%
mutate(file_name = gsub("[^-A-Za-z' ]","", tools::file_path_sans_ext(file_name)))%>%
mutate(file_name = str_trim(file_name))%>%
rowwise()%>%
mutate(file_name = ifelse(str_detect(file_name, " - "),
paste(strsplit(file_name, " - ")[[1]][2], strsplit(file_name, " - ")[[1]][1], sep = " "),
file_name)
)%>%
mutate(file_name =case_when(
file_name == "Black-Billed Magpie" ~ "Black-billed Magpie",
file_name == "Black-Capped Chickadee" ~ "Black-capped Chickadee",
file_name == "Common Night Hawk" ~ "Common Nighthawk",
file_name == "Downey Woodpecker" ~ "Downy Woodpecker",
file_name == "Green-Tailed Towhee" ~ "Green-tailed Towhee",
file_name == "Sage Sparrow" ~ "Sagebrush Sparrow",
file_name == "Western Scrub Jay" ~ "Woodhouse's Scrub-Jay",
file_name == "White-Throated Swift" ~ "White-throated Swift",
TRUE ~ file_name
))
files%>%
filter(str_detect(file_name, "Swift"))
files%>%View()
bird_list%>%
left_join(files, by = c("common_name" = "file_name"))%>%
write_csv("cleaned_list_with_filenames.csv")
| /R/combine_audio_and_bird_list.R | no_license | mschmidty/bird_id_trainer | R | false | false | 2,229 | r | library(tidyverse)
library(fuzzyjoin)
read_csv("AOUlist.csv", skip=1)%>%
select(`Scientific Name`, `Common Name`, Order, Family)%>%
filter(str_detect(`Scientific Name`, "Aphelocoma"))
read_csv("TRFO_bird_list.csv")%>%
select(-`General Grouping`)%>%
left_join(
read_csv("AOUlist.csv", skip=1)%>%
select(`Scientific Name`, `Common Name`, Order, Family),
by = "Common Name"
)%>%
filter(is.na(`Scientific Name`))
read_csv("data/NACC_list_species.csv")%>%
filter(str_detect(common_name, "Sage"))
bird_list<-read_csv("data/TRFO_bird_list.csv")%>%
select(-`General Grouping`)%>%
left_join(
read_csv("data/NACC_list_species.csv"),
by = c("Common Name" = "common_name")
)%>%
select(-french_name, -subfamily)%>%
select(`Common Name`, order:species)%>%
rename(common_name = `Common Name`)
list.files("bird_songs_of_the_rockies")%>%
bind_cols(list.files, "bird_songs_of_the_rockies", full.names = T)
files<-tibble(
file_name = list.files("bird_songs_of_the_rockies"),
audio_file_path = list.files("bird_songs_of_the_rockies", full.names = T)
)%>%
mutate(file_name = gsub("[^-A-Za-z' ]","", tools::file_path_sans_ext(file_name)))%>%
mutate(file_name = str_trim(file_name))%>%
rowwise()%>%
mutate(file_name = ifelse(str_detect(file_name, " - "),
paste(strsplit(file_name, " - ")[[1]][2], strsplit(file_name, " - ")[[1]][1], sep = " "),
file_name)
)%>%
mutate(file_name =case_when(
file_name == "Black-Billed Magpie" ~ "Black-billed Magpie",
file_name == "Black-Capped Chickadee" ~ "Black-capped Chickadee",
file_name == "Common Night Hawk" ~ "Common Nighthawk",
file_name == "Downey Woodpecker" ~ "Downy Woodpecker",
file_name == "Green-Tailed Towhee" ~ "Green-tailed Towhee",
file_name == "Sage Sparrow" ~ "Sagebrush Sparrow",
file_name == "Western Scrub Jay" ~ "Woodhouse's Scrub-Jay",
file_name == "White-Throated Swift" ~ "White-throated Swift",
TRUE ~ file_name
))
files%>%
filter(str_detect(file_name, "Swift"))
files%>%View()
bird_list%>%
left_join(files, by = c("common_name" = "file_name"))%>%
write_csv("cleaned_list_with_filenames.csv")
|
### Prepare the environment
# Change env timezone to UTC make xts subsetting work
Sys.setenv(TZ='UTC')
# Load the required libraries
library(stats)
library(xts)
library(vRODBC)
library(distributedR)
library(Metrics)
library(HPdclassifier)
library(HPdregression)
library(e1071)
# Start Distributed R (ignore error if already started)
tryCatch(
{
distributedR_start()
}, error= function(e) {
if (!grepl('is already running', e$message)) stop(e)
}
)
# Connect to the Database we configured in odbc.ini
con <- odbcConnect("VerticaGasDSN")
# Function to create the SQL query to load the data.
get_query <- function(table_name) {
# The SQL query will use Vertica's FIRST_VALUE analytic function to get the closing value for the month
# regardless if we're working with monthly, weekly or daily values. Vertica is *very* efficient in this
# kind of work and can saves us from loading unnecessary fine grained data into R's memory.
# We'll detect the data column name from meta-data
data_col <- names(sqlQuery(con, paste('select * from', table_name, 'limit 0')))[2]
# Note that the query will run the FIRST_VALUE function on a DESC ordered by date set because we want the closing value.
# We's run in ASC order if we were interested in the opening value.
sub_query <- paste("select year(date) as y, month(date) as m,",
"first_value(", data_col, ") over(partition by year(date), month(date) order by date desc) closing",
"from", table_name)
# The data is not grouped, so we'll wrap our query to group our data by year and month. Additionaly we're going to return
# the (year, month) information in the xts "yearmon" representation format (year + (month_number-1)/12)
query <- paste("select y + (m-1)/12 as yearmon, max(closing) as closing from",
"(", sub_query, ") as subq",
"group by 1 order by 1")
return(query)
}
# We'll create a load_data function to fully load a table from Vertica into Distributed R
load_data <- function(con, table_name) {
d <- sqlQuery(con, get_query(table_name))
# Detect zero, assing to NA, create xts object and interpolate.
d[2][d[2]==0] <- NA
d.x <- xts(d[2], order.by=as.yearmon(d$yearmon))
d.x <- na.approx(d.x)
return (d.x)
}
## Do load the data
# Data 1: Weekly U.S. Ending Stocks of Crude Oil and Petroleum Products (Thousand Barrels)
crude_stock <- load_data(con, "crude_oil_and_petroleum")
# Data 2: Weekly U.S. Ending Stocks of Total Gasoline (Thousand Barrels)
gas_stock <- load_data(con, "total_gasoline")
# Data 3: Cushing, OK Crude Oil Future Contract 1 (Dollars per Barrel)
# Contract 1 reflect "next delivery" price of crude oil. We could use Contract 3 price from EIA
# for a better model, but we'll stick to the dataset we already have.
crude_price <- load_data(con, "crude_oil_future_contract")
# Data 4: US Regular Conventional Gasoline Retail Prices (Dollars per Gallon)
gas_price <- load_data(con, "us_regular_conventional_gasoline_price")
# Data 5: U.S. Field Production of Crude Oil (Thousand Barrels)
# Note: there's a typo in table name
crude_prod <- load_data(con, "us_field_production_of_curde_oil")
# Data 6: U.S. Total Gasoline All Sales/Deliveries by Prime Supplier
gas_sales <- load_data(con, "total_gasoline_by_prime_supplier")
# close DB handle
odbcCloseAll()
# We want to predict the gas price in 3 months. So we'll also create a data set where move the dates 3 months to the past.
idx_3m <- index(gas_price) - 3/12 # each month in yearmon objects is 1/12
gas_price_3m <- xts(coredata(gas_price), order.by = idx_3m)
# Stock levels have a seasonal component that we should remove to improve our prediction. We'll use the
# "decompose" function from "stats" package and remove the "seasonal" part from our data.
crude_stock <-(xts(as.ts(crude_stock) / decompose(as.ts(crude_stock), type = 'mult')$seasonal,
order.by=index(crude_stock)))
gas_stock <- (xts(as.ts(gas_stock) / decompose(as.ts(gas_stock), type = 'mult')$seasonal,
order.by=index(gas_stock)))
# We'll merge the data into a single xts object, filter for complete cases only and adjust their names.
# The last value is the response data. We'll also remove the objects from environment to save memory
dset <- merge(crude_price, crude_prod, crude_stock, gas_sales, gas_stock, gas_price, gas_price_3m)
dset <- dset[complete.cases(dset)]
names(dset) <- c("crude_price", "crude_prod", "crude_stock", "gas_sales", "gas_stock", "gas_price", "gas_price_3m")
rm(crude_price, crude_prod, crude_stock, gas_sales, gas_stock, gas_price, gas_price_3m)
# Now we'll split the data into training and test sets from start to "2010-Dec"
# First of all, we'll detect the earliest start date by applying the "start"
# function to our dataset ignoring NA values. Then we find the max start date
# and convert it back to "yearmon" object
dset.train <- as.data.frame(dset["/2010"])
dset.test <- as.data.frame(dset["2011/"])
# We'll create a two Random Forest models, one using all variables and another using selected variables we know from testing that gives us
# a better prediction, defined as a lower CV(RMSE). We'll use Distributed R's "hprandomForest" function available from HPdclassifier package.
# Though it's labelled as a "classifier", it can be used for regression too.
# First we'll calculate the number of executors and prepare our "actual" data to compare to predictions
# The number of executors is, ideally a bit lower the number of Distributed R instances. We can get the info from the distributedR_status
# function.
nExecutor <- sum(distributedR_status()$Inst)
actuals <- dset.test$gas_price_3m
# We'll create a "model runner" function to simply the testing using a number of different predictor variables. The function
# return a list containing $model: the created model, $predictions: the predicted values, $rmse: the RMSE "CV(RMSE)".
# The RMSE is the Root Mean Squared Error of predictions and we'll use that to judge how "predictive" our model is. The lower the
# value, the better
# Use Distributed R Random Forest function, a parallelized version of the algorithm
build_randomForest <- function (predictors) {
time <- system.time({
# train the model
X = dset.train[predictors]
Y = dset.train$gas_price_3m
my_model <- hpdrandomForest(x = X, y = Y, nExecutor = nExecutor, importance = T)
# test the model
predicted_vals <- predict(my_model, dset.test[predictors])
model_rmse <- rmse(actuals, predicted_vals)
})
return(list("model" = my_model, "predictions" = predicted_vals, "rmse" = model_rmse, "time" = time[3]))
}
# Use Distributed R glm function, a parallelized version of the algorithm for Generalized Linear Models
build_glm <- function(predictors) {
time <- system.time({
# Train the model. Since we're not supplying the number of blocks, the function will strip
# the data across the cluster by default.
X = as.darray(as.matrix(dset.train[predictors]))
Y = as.darray(as.matrix(dset.train$gas_price_3m))
my_model <- hpdglm(responses = Y, predictors = X, family = gaussian(link=identity))
# test the model
predicted_vals <- predict(my_model, as.matrix(dset.test[predictors]))
model_rmse <- rmse(actuals, predicted_vals)
})
return(list("model" = my_model, "predictions" = predicted_vals, "rmse" = model_rmse, "time" = time[3]))
}
# Create a Support Vector Regression model for the data. We want to optimize the epsilon parameter
# so we're using a "nu-regression" type. Note that HP has not yet implemented a distributed version of
# this algorithm, so we're using the non-distributed version provided on e1071 package.
build_svm <- function(predictors) {
time <- system.time ({
# Train the model. Since we're not supplying the number of blocks, the function will strip
# the data across the cluster by default.
X = as.matrix(dset.train[predictors])
Y = as.matrix(dset.train$gas_price_3m)
my_model <- svm(y = Y, X, type = 'nu-regression', kernel = 'linear')
# test the model
predicted_vals <- predict(my_model, as.matrix(dset.test[predictors]))
model_rmse <- rmse(actuals, predicted_vals)
})
return(list("model" = my_model, "predictions" = predicted_vals, "rmse" = model_rmse, "time" = time[3]))
}
# Model 0: Naïve "no-change" model. It assumes the gasoline price in 3 months will be equal to current price.
# The no-change model is usually used to control if our costlier models do improve over the simplest approach to forecasting.
r0 <- list()
r0$time <- system.time({
r0$predictions <- dset.test$gas_price
r0$rmse <- rmse(actuals, r0$predictions)
})[3]
# Model 1: RF model using all variables. The RF model is good for this kind of exploratory work since it can calculate
# the "importance" of each variable and let us take somewhat unimportant data out of the training set and maybe improve
# our models.
predictors_all <- names(dset.test)[names(dset.test) != "gas_price_3m"]
r1 <- build_randomForest(predictors_all)
importance <- r1$model$importance
print("Variable importance from Random Forest model with all variables:")
print(importance)
# We'll select only a few predictors we judge more relevant for our model. Then we'll create the remaining models
predictors = c("crude_price", "crude_stock", "gas_price")
# Model 2: create a RF model based exclusively on selected variables
r2 <- build_randomForest(predictors)
# Model 3: GLM Gaussian model based exclusively on selected variables
r3 <- build_glm(predictors)
# Model 4: Support Vector Regression model based on selected variables.
r4 <- build_svm(predictors)
# Let's analyse the RMSE and Execution time of all models
RMSE <- c(r0$rmse, r1$rmse, r2$rmse, r3$rmse, r4$rmse)
EXEC_TIME <- c(r0$time, r1$time, r2$time, r3$time, r4$time)
models_summary <- data.frame("RMSE" = RMSE, "Execution Time" = EXEC_TIME)
row.names(models_summary) <- c("Naive", "Random Forest (all vars)", "Random Forest (key vars)", "GLM Gaussian", "SVR")
print("")
print("Comparison of models")
print(models_summary)
# Our models improve slightly over the Naive approach - ~0.35 RMSE against ~0.37. We're picking the Ramdom Forest
# model to output our results.
test.output <- cbind(dset.test[predictors], prediction_gas_price_3m=r2$predictions, actual_gas_price_3m=actuals)
write.csv(test.output, file="output.csv")
print("Output written to 'output.csv' file in the current directory.") | /gas_prediction.r | permissive | cjalmeida/gas_challenge | R | false | false | 10,521 | r | ### Prepare the environment
# Change env timezone to UTC make xts subsetting work
Sys.setenv(TZ='UTC')
# Load the required libraries
library(stats)
library(xts)
library(vRODBC)
library(distributedR)
library(Metrics)
library(HPdclassifier)
library(HPdregression)
library(e1071)
# Start Distributed R (ignore error if already started)
tryCatch(
{
distributedR_start()
}, error= function(e) {
if (!grepl('is already running', e$message)) stop(e)
}
)
# Connect to the Database we configured in odbc.ini
con <- odbcConnect("VerticaGasDSN")
# Function to create the SQL query to load the data.
get_query <- function(table_name) {
# The SQL query will use Vertica's FIRST_VALUE analytic function to get the closing value for the month
# regardless if we're working with monthly, weekly or daily values. Vertica is *very* efficient in this
# kind of work and can saves us from loading unnecessary fine grained data into R's memory.
# We'll detect the data column name from meta-data
data_col <- names(sqlQuery(con, paste('select * from', table_name, 'limit 0')))[2]
# Note that the query will run the FIRST_VALUE function on a DESC ordered by date set because we want the closing value.
# We's run in ASC order if we were interested in the opening value.
sub_query <- paste("select year(date) as y, month(date) as m,",
"first_value(", data_col, ") over(partition by year(date), month(date) order by date desc) closing",
"from", table_name)
# The data is not grouped, so we'll wrap our query to group our data by year and month. Additionaly we're going to return
# the (year, month) information in the xts "yearmon" representation format (year + (month_number-1)/12)
query <- paste("select y + (m-1)/12 as yearmon, max(closing) as closing from",
"(", sub_query, ") as subq",
"group by 1 order by 1")
return(query)
}
# We'll create a load_data function to fully load a table from Vertica into Distributed R
load_data <- function(con, table_name) {
d <- sqlQuery(con, get_query(table_name))
# Detect zero, assing to NA, create xts object and interpolate.
d[2][d[2]==0] <- NA
d.x <- xts(d[2], order.by=as.yearmon(d$yearmon))
d.x <- na.approx(d.x)
return (d.x)
}
## Do load the data
# Data 1: Weekly U.S. Ending Stocks of Crude Oil and Petroleum Products (Thousand Barrels)
crude_stock <- load_data(con, "crude_oil_and_petroleum")
# Data 2: Weekly U.S. Ending Stocks of Total Gasoline (Thousand Barrels)
gas_stock <- load_data(con, "total_gasoline")
# Data 3: Cushing, OK Crude Oil Future Contract 1 (Dollars per Barrel)
# Contract 1 reflect "next delivery" price of crude oil. We could use Contract 3 price from EIA
# for a better model, but we'll stick to the dataset we already have.
crude_price <- load_data(con, "crude_oil_future_contract")
# Data 4: US Regular Conventional Gasoline Retail Prices (Dollars per Gallon)
gas_price <- load_data(con, "us_regular_conventional_gasoline_price")
# Data 5: U.S. Field Production of Crude Oil (Thousand Barrels)
# Note: there's a typo in table name
crude_prod <- load_data(con, "us_field_production_of_curde_oil")
# Data 6: U.S. Total Gasoline All Sales/Deliveries by Prime Supplier
gas_sales <- load_data(con, "total_gasoline_by_prime_supplier")
# close DB handle
odbcCloseAll()
# We want to predict the gas price in 3 months. So we'll also create a data set where move the dates 3 months to the past.
idx_3m <- index(gas_price) - 3/12 # each month in yearmon objects is 1/12
gas_price_3m <- xts(coredata(gas_price), order.by = idx_3m)
# Stock levels have a seasonal component that we should remove to improve our prediction. We'll use the
# "decompose" function from "stats" package and remove the "seasonal" part from our data.
crude_stock <-(xts(as.ts(crude_stock) / decompose(as.ts(crude_stock), type = 'mult')$seasonal,
order.by=index(crude_stock)))
gas_stock <- (xts(as.ts(gas_stock) / decompose(as.ts(gas_stock), type = 'mult')$seasonal,
order.by=index(gas_stock)))
# We'll merge the data into a single xts object, filter for complete cases only and adjust their names.
# The last value is the response data. We'll also remove the objects from environment to save memory
dset <- merge(crude_price, crude_prod, crude_stock, gas_sales, gas_stock, gas_price, gas_price_3m)
dset <- dset[complete.cases(dset)]
names(dset) <- c("crude_price", "crude_prod", "crude_stock", "gas_sales", "gas_stock", "gas_price", "gas_price_3m")
rm(crude_price, crude_prod, crude_stock, gas_sales, gas_stock, gas_price, gas_price_3m)
# Now we'll split the data into training and test sets from start to "2010-Dec"
# First of all, we'll detect the earliest start date by applying the "start"
# function to our dataset ignoring NA values. Then we find the max start date
# and convert it back to "yearmon" object
dset.train <- as.data.frame(dset["/2010"])
dset.test <- as.data.frame(dset["2011/"])
# We'll create a two Random Forest models, one using all variables and another using selected variables we know from testing that gives us
# a better prediction, defined as a lower CV(RMSE). We'll use Distributed R's "hprandomForest" function available from HPdclassifier package.
# Though it's labelled as a "classifier", it can be used for regression too.
# First we'll calculate the number of executors and prepare our "actual" data to compare to predictions
# The number of executors is, ideally a bit lower the number of Distributed R instances. We can get the info from the distributedR_status
# function.
nExecutor <- sum(distributedR_status()$Inst)
actuals <- dset.test$gas_price_3m
# We'll create a "model runner" function to simply the testing using a number of different predictor variables. The function
# return a list containing $model: the created model, $predictions: the predicted values, $rmse: the RMSE "CV(RMSE)".
# The RMSE is the Root Mean Squared Error of predictions and we'll use that to judge how "predictive" our model is. The lower the
# value, the better
# Use Distributed R Random Forest function, a parallelized version of the algorithm
build_randomForest <- function (predictors) {
time <- system.time({
# train the model
X = dset.train[predictors]
Y = dset.train$gas_price_3m
my_model <- hpdrandomForest(x = X, y = Y, nExecutor = nExecutor, importance = T)
# test the model
predicted_vals <- predict(my_model, dset.test[predictors])
model_rmse <- rmse(actuals, predicted_vals)
})
return(list("model" = my_model, "predictions" = predicted_vals, "rmse" = model_rmse, "time" = time[3]))
}
# Use Distributed R glm function, a parallelized version of the algorithm for Generalized Linear Models
build_glm <- function(predictors) {
time <- system.time({
# Train the model. Since we're not supplying the number of blocks, the function will strip
# the data across the cluster by default.
X = as.darray(as.matrix(dset.train[predictors]))
Y = as.darray(as.matrix(dset.train$gas_price_3m))
my_model <- hpdglm(responses = Y, predictors = X, family = gaussian(link=identity))
# test the model
predicted_vals <- predict(my_model, as.matrix(dset.test[predictors]))
model_rmse <- rmse(actuals, predicted_vals)
})
return(list("model" = my_model, "predictions" = predicted_vals, "rmse" = model_rmse, "time" = time[3]))
}
# Create a Support Vector Regression model for the data. We want to optimize the epsilon parameter
# so we're using a "nu-regression" type. Note that HP has not yet implemented a distributed version of
# this algorithm, so we're using the non-distributed version provided on e1071 package.
build_svm <- function(predictors) {
time <- system.time ({
# Train the model. Since we're not supplying the number of blocks, the function will strip
# the data across the cluster by default.
X = as.matrix(dset.train[predictors])
Y = as.matrix(dset.train$gas_price_3m)
my_model <- svm(y = Y, X, type = 'nu-regression', kernel = 'linear')
# test the model
predicted_vals <- predict(my_model, as.matrix(dset.test[predictors]))
model_rmse <- rmse(actuals, predicted_vals)
})
return(list("model" = my_model, "predictions" = predicted_vals, "rmse" = model_rmse, "time" = time[3]))
}
# Model 0: Naïve "no-change" model. It assumes the gasoline price in 3 months will be equal to current price.
# The no-change model is usually used to control if our costlier models do improve over the simplest approach to forecasting.
r0 <- list()
r0$time <- system.time({
r0$predictions <- dset.test$gas_price
r0$rmse <- rmse(actuals, r0$predictions)
})[3]
# Model 1: RF model using all variables. The RF model is good for this kind of exploratory work since it can calculate
# the "importance" of each variable and let us take somewhat unimportant data out of the training set and maybe improve
# our models.
predictors_all <- names(dset.test)[names(dset.test) != "gas_price_3m"]
r1 <- build_randomForest(predictors_all)
importance <- r1$model$importance
print("Variable importance from Random Forest model with all variables:")
print(importance)
# We'll select only a few predictors we judge more relevant for our model. Then we'll create the remaining models
predictors = c("crude_price", "crude_stock", "gas_price")
# Model 2: create a RF model based exclusively on selected variables
r2 <- build_randomForest(predictors)
# Model 3: GLM Gaussian model based exclusively on selected variables
r3 <- build_glm(predictors)
# Model 4: Support Vector Regression model based on selected variables.
r4 <- build_svm(predictors)
# Let's analyse the RMSE and Execution time of all models
RMSE <- c(r0$rmse, r1$rmse, r2$rmse, r3$rmse, r4$rmse)
EXEC_TIME <- c(r0$time, r1$time, r2$time, r3$time, r4$time)
models_summary <- data.frame("RMSE" = RMSE, "Execution Time" = EXEC_TIME)
row.names(models_summary) <- c("Naive", "Random Forest (all vars)", "Random Forest (key vars)", "GLM Gaussian", "SVR")
print("")
print("Comparison of models")
print(models_summary)
# Our models improve slightly over the Naive approach - ~0.35 RMSE against ~0.37. We're picking the Ramdom Forest
# model to output our results.
test.output <- cbind(dset.test[predictors], prediction_gas_price_3m=r2$predictions, actual_gas_price_3m=actuals)
write.csv(test.output, file="output.csv")
print("Output written to 'output.csv' file in the current directory.") |
#########################################################################
#
# Auxiliar program to run all experiments in parallel
#
# Author: Marcos A. Domingues
# Date: September, 2016
#
#########################################################################
# Setup the input and output dir to run all experiments at the same time
runExperiments <- function(from='/home/mad/Experimentos/mad/CARSlibrary/topics', to='/home/mad/Experimentos/mad/CARSlibrary'){
files <- list.files(from)
for(i in files){
file.copy(paste(from, '/', i, sep=''), paste(to, '/', 'dataset.csv', sep=''))
source(file="daviBEST.R")
source(file="filterPoF.R")
source(file="ibcf.R")
source(file="cReduction.R")
source(file="weightPoF.R")
tmp <- unlist(strsplit(as.character(i), "\\."))[1]
file.rename("/home/mad/Experimentos/mad/CARSlibrary/result", paste(to, '/', tmp, sep=''))
dir.create("/home/mad/Experimentos/mad/CARSlibrary/result")
file.remove(paste(to, '/', 'dataset.csv', sep=''))
}
}
runExperiments()
| /runExperiments.R | no_license | markchou/CARSlibrary | R | false | false | 1,123 | r | #########################################################################
#
# Auxiliar program to run all experiments in parallel
#
# Author: Marcos A. Domingues
# Date: September, 2016
#
#########################################################################
# Setup the input and output dir to run all experiments at the same time
runExperiments <- function(from='/home/mad/Experimentos/mad/CARSlibrary/topics', to='/home/mad/Experimentos/mad/CARSlibrary'){
files <- list.files(from)
for(i in files){
file.copy(paste(from, '/', i, sep=''), paste(to, '/', 'dataset.csv', sep=''))
source(file="daviBEST.R")
source(file="filterPoF.R")
source(file="ibcf.R")
source(file="cReduction.R")
source(file="weightPoF.R")
tmp <- unlist(strsplit(as.character(i), "\\."))[1]
file.rename("/home/mad/Experimentos/mad/CARSlibrary/result", paste(to, '/', tmp, sep=''))
dir.create("/home/mad/Experimentos/mad/CARSlibrary/result")
file.remove(paste(to, '/', 'dataset.csv', sep=''))
}
}
runExperiments()
|
##################################################################################################
## ##
## BALD is an R-package. ##
## It is a Bayesian time series model of loss development. ##
## Features include skewed Student-t distribution with time-varying scale parameters, ##
## an expert prior for the calendar year effect, ##
## and accommodation for structural breaks in the consumption path of development years. ##
## It is an update for the older package lossDev as it has been stopped supported. ##
## ##
## Copyright (c) 2018 Frank A. Schmid, ##
## ##
## This file is part of BALD. ##
## ##
## lossDev is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <https://www.gnu.org/licenses/>. ##
## ##
##################################################################################################
##' @include zzz.R
##' @include NodeOutput.R
##' @include LossDevModelOutput.R
NULL
##' The base output class for all aggregate annual models.
##'
##' \code{AnnualAggLossDevModelOutput} is the base output class for all aggregate annual model objects.
##' Derived classes should contain all output from a \acronym{JAGS} run of the input object in the slot \dQuote{input}.
##' Currenly only the slot \dQuote{input} is allowed to be a non-model node. All other nodes should be the exact name of some settable node in the model.
##' This is because \code{getModelOutputNodes} currently looks at the slot names to determine what values to set; only slot \dQuote{input} is known to be a slot other than a settable node.
##' This class is derived from \code{LossDevModelOutput}
##' @name AnnualAggLossDevModelOutput-class
##' @docType class
##' @seealso \code{\linkS4class{LossDevModelOutput}}
setClass(
'AnnualAggLossDevModelOutput',
representation(inc.pred='NodeOutput',
eta='NodeOutput',
eta.mu='NodeOutput',
sigma.eta='NodeOutput',
sigma.kappa='NodeOutput',
kappa.log.error='NodeOutput',
rho='NodeOutput',
rho.eta='NodeOutput',
h='NodeOutput',
sigma.h.2.log.innov='NodeOutput',
beta='NodeOutput',
df='NodeOutput',
k='NodeOutput',
mu.upper.left='NodeOutput',
a.ou='NodeOutput',
b.ou='NodeOutput',
stoch.log.inf.pred='NodeOutput',
kappa='NodeOutput',
delta.tail='NodeOutput',
#omega.obs='NodeOutput',
'VIRTUAL'),
contains='LossDevModelOutput')
##' A generic function to plot and/or return the posterior predicted exposure growth (corresponding to \emph{eta} in the model). See \code{vignette('BALD')}.
##'
##' @name exposureGrowth
##' @param object The object from which to plot and/or return the posterior predicted exposure growth.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting the exposure growth. Also returns a named numeric vector for the median of the posterior for the exposure growth on the real (not log) scale. Returned invisibly.
##' @seealso \code{\link[=exposureGrowth,AnnualAggLossDevModelOutput-method]{exposureGrowth("AnnualAggLossDevModelOutput")}}
##' \code{\link{exposureGrowthTracePlot}}
##' @exportMethod exposureGrowth
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(
##' as.integer(dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' exposureGrowthTracePlot(standard.model.output)
##' }
setGenericVerif('exposureGrowth',
function(object, plot=TRUE)
standardGeneric('exposureGrowth'))
##' A method to plot and/or return the posterior predicted exposure growth (corresponding to \emph{eta} in the model).
##'
##' @name exposureGrowth,AnnualAggLossDevModelOutput-method
##' @param object The object from which to plot and/or return the posterior predicted exposure growth.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting the exposure growth. Also returns a named numeric vector for the median of the posterior for the exposure growth on the real (not log) scale. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{exposureGrowth}}
##' \code{\link{exposureGrowthTracePlot}}
setMethod('exposureGrowth',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plot)
{
K <- getTriDim(object@input)[1]
eta <- object@eta@median[-1]
obs.years <- object@input@exposureYears[-1]
pred.years <- 1:(length(eta) - (K-1)) + max(obs.years)
eta.obs <- eta[1:length(obs.years)]
eta.pred <- eta[1:length(pred.years) + length(obs.years)]
ans <- c(eta.obs, eta.pred)
names(ans) <- c(obs.years, pred.years)
if(plot)
{
f.plot <- function()
{
plot(
x=range(obs.years, pred.years),
y=range(eta),
xlab=getExposureYearLabel(object@input),
ylab="Rate of Exposure Growth (Net of Calendar Year Effect)",
type='n',
cex.axis=1.25,
cex.lab=1.25)
lines(
x=obs.years,
y=eta.obs,
type='o',
lty=1,
pch=1,
lwd=1)
lines(
x=pred.years,
y=eta.pred,
type='o',
lty=3,
pch=20,
lwd=2)
abline(h=median(exp(slot(object@eta.mu, 'value')) - 1),
col='black',
lwd=2,
lty=2)
}
f.legend <- function()
{
legend('center',
c('Rate of Exposure Growth','Future Rate of Growth','Stationary Mean'),
col=c('black','black','black'),
lwd=c(1,2,2),
pch=c(1,20,NA),
lty=c(1,3,2),
horiz=TRUE,
bty='n',
xpd=NA)
}
plot.top.bottom(f.plot, f.legend)
}
return(invisible(ans))
})
##' A generic function to plot and/or return the difference between final actual and predicted cumulative payments. See \code{vignette('BALD')}.
##'
##' @name finalCumulativeDiff
##' @param object The object from which to plot and/or return the difference.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @param expYearRange Either a range of years (for example c(1995, 2006)) or one of the keywords \dQuote{all} or \dQuote{fullyObs}.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=finalCumulativeDiff,AnnualAggLossDevModelOutput-method]{finalCumulativeDiff("AnnualAggLossDevModelOutput")}}
##' @exportMethod finalCumulativeDiff
##' @examples
##' rm(list=ls())
##' library(BALD)
##' options(device.ask.default=FALSE)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' finalCumulativeDiff(standard.model.output)
##' }
setGenericVerif('finalCumulativeDiff',
function(object, plot=TRUE, expYearRange='all')
standardGeneric('finalCumulativeDiff'))
##' A method to plot and/or return the difference between final actual and predicted cumulative payments.
##'
##' The relative difference (x/y - 1) between the final observed cumulative payment and the corresponding predicted cumulative payment is plotted for each exposure year.
##' The horizontal lines of each box represent (starting from the top) the 90th, 75th, 50th, 20th, and 10th percentiles. Exposure years in which all cumulative payments are \code{NA} are omitted.
##'
##' If \code{expYearRange} is \dQuote{fullyObs}, then only exposure years with a non missing value in the first column will be plotted.
##'
##' @name finalCumulativeDiff,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the difference between final actual and predicted cumulative payments.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @param expYearRange Either a range of years (for example c(1995, 2006)) or one of the keywords \dQuote{all} or \dQuote{fullyObs}.
##' @return Mainly called for the side effect of plotting the difference between final actual and predicted cumulative payments by exposure year. Also returns a named array for the percentiles in the plot. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{finalCumulativeDiff}}
setMethod('finalCumulativeDiff',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plot, expYearRange)
{
K <- getTriDim(object@input)[1]
inc.pred.coda <- slot(object@inc.pred, 'value')[1:K, 1:K,,]
cumulatives <- object@input@cumulatives
exp.years <- object@input@exposureYears
if(is.character(expYearRange))
{
if(length(expYearRange) != 1)
stop('"expYearRange" must be of length one if it is a character')
if(expYearRange != 'all' && expYearRange != 'fullyObs')
stop('"expYearRange" must be one of "all" or "fullyObs" if it is supplied as a character')
if(expYearRange == 'all')
expYearRange <- range(exp.years)
else
expYearRange <- range(exp.years[which(!is.na(cumulatives[,1]))])
} else {
if(!all(as.integer(expYearRange) == expYearRange))
stop('"expYearRange" must be supplied as an integer')
if(length(expYearRange) != 2)
stop('"expYearRange" must have length 2')
if(max(exp.years) < max(expYearRange) || min(exp.years) > min(expYearRange))
stop('"expYearRange" must be a subset of the actual exposure years')
}
cumulative.resi.stats <- array(NA, c(5, K), dimnames=list(c('10%', '25%', '50%', '75%', '90%'), exp.years))
for(i in 1:K)
{
tmp <- which(!is.na(cumulatives[i,]))
if(length(tmp) == 0)
{
cumulative.resi.stats[,i] <- NA
next
}else{
last.obs.cumulative.column <- max(tmp)
if(length(tmp) == 1 && tmp[1] == 1)
diff <- cumulatives[i,last.obs.cumulative.column] /apply(inc.pred.coda[i,1,,], c(1,2), sum) - 1
else
diff <- cumulatives[i,last.obs.cumulative.column] / apply(inc.pred.coda[i,1:last.obs.cumulative.column,,], c(2,3), sum) - 1
stats <- quantile(diff, c(.1, .25, .5, .75, .9))
cumulative.resi.stats[names(stats),i] <- stats
}
}
if(plot)
{
expYearRange.seq <- seq(expYearRange[1], expYearRange[2])
plot(
x=range(exp.years) + c(-1, +1),
y=range(as.vector(cumulative.resi.stats[,as.character(expYearRange.seq) ]), na.rm=TRUE),
type='n',
xlab=getExposureYearLabel(object@input),
ylab="Relative Difference Between Actual and Estimated Cumulatives",
cex.axis=1.25,
cex.lab=1.25)
abline(h=0,col='gray23',lwd=2,lty='dashed')
for(i in seq_along(expYearRange.seq))
{
year.i <- expYearRange.seq[i]
i. <- match(year.i, object@input@exposureYears)
##draw median to make it thick
off.set <- .45
lines(x=c(year.i-off.set, year.i+off.set),
y=rep(cumulative.resi.stats['50%',i.],2),
lwd=2)
##upper 25%
off.set <- .45
upper.lower <- c('75%','50%')
lines(x=c(year.i-off.set, year.i+off.set, year.i+off.set, year.i-off.set, year.i-off.set),
y=cumulative.resi.stats[upper.lower[c(1,1,2,2,1)],i.])
##lower 25%
off.set <- .45
upper.lower <- c('50%','25%')
lines(x=c(year.i-off.set, year.i+off.set, year.i+off.set, year.i-off.set, year.i-off.set),
y=cumulative.resi.stats[upper.lower[c(1,1,2,2,1)],i.])
##lower%
off.set <- .25
upper.lower <- c('25%','10%')
lines(x=c(year.i-off.set, year.i+off.set, year.i+off.set, year.i-off.set, year.i-off.set),
y=cumulative.resi.stats[upper.lower[c(1,1,2,2,1)],i.])
##upper%
off.set <- .25
upper.lower <- c('90%','75%')
lines(x=c(year.i-off.set, year.i+off.set, year.i+off.set, year.i-off.set, year.i-off.set),
y=cumulative.resi.stats[upper.lower[c(1,1,2,2,1)],i.])
}
}
return(invisible(cumulative.resi.stats))
}
)
##' A generic function to plot and/or return residuals for models in the \pkg{BALD} package. See \code{vignette('BALD')}.
##'
##' @name triResi
##' @param object The object from which to plot and/or return the residuals.
##' @param standardize A logical value. If \code{TRUE}, the plotted and returned residuals are normalized to their respective standard deviation.
##' @param timeAxis A character value describing along which of the three time axes to plot the residuals: \sQuote{dy} for development year time, \sQuote{cy} for calendar year time, \sQuote{ey} for exposure year time.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=triResi,AnnualAggLossDevModelOutput-method]{triResi("AnnualAggLossDevModelOutput")}}
##' \code{\link{QQPlot}}
##' @exportMethod triResi
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' #residule plot by Development Year
##' triResi(standard.model.output, timeAxis='dy')
##' #residule plot by exposure year
##' triResi(standard.model.output, timeAxis='ey')
##' #residule plot by calendar year
##' triResi(standard.model.output, timeAxis='cy')
##' }
setGenericVerif('triResi',
function(object, timeAxis=c('dy', 'cy', 'ey'), standardize=TRUE, plot=TRUE)
standardGeneric('triResi'))
##' A method to plot and/or return residuals for models in the \pkg{BALD} package.
##'
##' Because the model is Bayesian, each residual comes as a distribution. To ease graphical interpretation, only the median for each residual is plotted/returned.
##' The residual is defined as the observed value minus the posterior mean; if standardized, it is also divided by its posterior standard deviation.
##'
##' @name triResi,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the residuals.
##' @param timeAxis A character value describing along which of the three (3) time axis to plot the residuals. \sQuote{dy} for development year time, \sQuote{cy} for calendar year time, \sQuote{ey} for exposure year time.
##' @param standardize A logical value. If \code{TRUE}, the plotted and returned residuals are normalized to their respective standard deviation.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with the same structure as the input triangle. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{triResi}}
##' \code{\link{QQPlot}}
setMethod('triResi',
signature(object='AnnualAggLossDevModelOutput'),
function(object, timeAxis, standardize, plot)
{
timeAxis <- match.arg(timeAxis)
K <- getTriDim(object@input)[1]
log.inc <- object@input@incrementals
log.inc[log.inc <= 0] <- NA
log.inc <- log(log.inc)
mu <- slot(object@mu.upper.left, 'value')
beta <- slot(object@beta, 'value')[1,,]
v <- slot(object@df, 'value')[1,,]
h <- slot(object@h, 'value')
resi <- array(NA, c(K, K), list(object@input@exposureYears, NULL))
v.factor <- v / (v - 2)
h.squared <- h ^ 2
if(standardize)
{
h.to.the.forth <- h.squared ^ 2
var.second.factor <- 2 * beta ^ 2 *v.factor ^ 2 / (v - 4)
for(i in 1:K)
for(j in 1:K)
{
if(is.na(log.inc[i,j]))
next
resi[i,j] <- median((log.inc[i,j] -
(mu[i,j,,] + beta * h.squared[j,,] * v.factor)) /
sqrt(h.squared[j,,] * v.factor + h.to.the.forth[j,,] * var.second.factor))
}
} else {
for(i in 1:K)
for(j in 1:K)
{
if(is.na(log.inc[i,j]))
next
resi[i,j] <- median(log.inc[i,j] -
(mu[i,j,,] + beta * h.squared[j,,] * v.factor))
}
}
if(identical(timeAxis, 'dy'))
{
f.plot <- function()
{
plot(x=c(1,K),
y=range(resi, na.rm=TRUE),
ylab=ifelse(standardize, 'Standardized Residuals', 'Residuals'),
xlab='Development Year',
type="n",
cex.axis=1.25,
cex.lab=1.25)
abline(a=0,b=0,lwd=2,col='black',lty='dashed')
for(i in 1:K)
points(x=rep(i,K),
y=resi[,i])
points(x=1:K, y=apply(resi, 2, median, na.rm=TRUE), lwd=3, type="h", col='red') #bars
points(x=1:K, y=apply(resi, 2, median, na.rm=TRUE), pch=20, type="p", col='red') #pinheads
}
} else if(identical(timeAxis, 'ey')) {
exp.years <- object@input@exposureYears
f.plot <- function()
{
plot(x=range(exp.years),
y=range(resi, na.rm=TRUE),
ylab=ifelse(standardize, 'Standardized Residuals', 'Residuals'),
xlab=getExposureYearLabel(object@input),
type="n",
cex.axis=1.25,
cex.lab=1.25)
abline(a=0,b=0,lwd=2,col='black',lty='dashed')
for(i in 1:K)
points(x=rep(exp.years[i],K),
y=resi[i,])
points(x=exp.years, y=apply(resi, 1, median, na.rm=TRUE), lwd=3, type="h", col='red') #bars
points(x=exp.years, y=apply(resi, 1, median, na.rm=TRUE), pch=20, type="p", col='red') #pinheads
}
} else if(identical(timeAxis, 'cy')) {
cal.years <- object@input@exposureYears
f.plot <- function()
{
plot(x=range(cal.years),
y=range(resi, na.rm=TRUE),
ylab=ifelse(standardize, 'Standardized Residuals', 'Residuals'),
xlab='Calendar Year',
type="n",
cex.axis=1.25,
cex.lab=1.25)
abline(a=0,b=0,lwd=2,col='black',lty='dashed')
i <- rep(1:K, K)
j <- rep(1:K, rep(K, K))
for(k in 1:K)
{
sub <- resi[i+j-1 == k]
l <- length(sub)
points(x=rep(cal.years[k],l),
y=sub)
points(x=cal.years[k],
y=median(sub, na.rm=TRUE),
lwd=3,
type='h',
col='red')
points(x=cal.years[k],
y=median(sub, na.rm=TRUE),
lwd=3,
type='p',
col='red')
}
}
}
f.legend <- function()
{
legend('center',
'Median of Residuals',
col=c('red'),
pch=c(20),
horiz=TRUE,
bty='n',
xpd=NA)
}
if(plot)
plot.top.bottom(f.plot, f.legend)
return(invisible(resi))
})
##' A generic function to plot a Q-Q plot for models in the \pkg{BALD} package.
##'
##' This function plots sorted observed log incremental payments vs sorted predicted log incremental payments.
##' Credible intervals are also plotted. See \code{vignette('BALD')}.
##'
##' @name QQPlot
##' @param object The object from which to plot the values.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=QQPlot,AnnualAggLossDevModelOutput-method]{QQPlot("AnnualAggLossDevModelOutput")}}
##' \code{\link{triResi}}
##' @exportMethod QQPlot
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' QQPlot(standard.model.output)
##' }
setGenericVerif('QQPlot',
function(object)
standardGeneric('QQPlot'))
##' A method to plot a Q-Q plot for models in the \pkg{BALD} package.
##'
##' This function plots sorted observed log incremental payments vs sorted predicted log incremental payments.
##' Credible intervals are also plotted.
##'
##' @name QQPlot,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot the values.
##' @return NULL. Called for the side effect of plotting.
##' @docType methods
##' @seealso \code{\link{QQPlot}}
##' \code{\link{triResi}}
setMethod('QQPlot',
signature(object='AnnualAggLossDevModelOutput'),
function(object)
{
##plot obs log.inc vs log.inc.pred
##by first sorting for every draw the log.inc.pred and then taking quantiles over the sorted values
f.plot <- function()
{
K <- getTriDim(object@input)[1]
inc.obs <- object@input@incrementals
inc.pred <- slot(object@inc.pred, 'value')[1:K, 1:K, , ]
log.inc.obs <- inc.obs
log.inc.obs[log.inc.obs <= 0] <- NA
log.inc.obs <- log(log.inc.obs)
log.inc.obs.not.na <- !is.na(log.inc.obs)
obs.s <- sort(as.vector(inc.obs[log.inc.obs.not.na]))
pred.s <- apply(inc.pred, c(3,4), function(x) sort(as.vector(x[log.inc.obs.not.na])))
pred.s.q <- apply(pred.s, 1, quantile, c(0.05, 0.5, 0.95))
plot(x=range(obs.s),
xlab='Sorted Observed Incrementals (Log Scale)',
y=range(pred.s.q),
ylab='Sorted Predicted Incrementals (Log Scale)',
type='n',
cex.axis=1.25,
cex.lab=1.25,
log='xy')
lines(x=obs.s,
y=pred.s.q[1,])
points(x=obs.s,
y=pred.s.q[2,],
cex=1.3)
lines(x=obs.s,
y=pred.s.q[3,])
abline(a=0,b=1,col='red',lty=2)
}
f.legend <- function()
{
legend('center',
c('Median', '90 Percent\nCredible Intervals', '45 Degree Line'),
col=c('black', 'black', 'red'),
lty=c(NA, 1, 2),
pch=c(1, NA, NA),
horiz=TRUE,
bty='n',
xpd=NA)
}
plot.top.bottom(f.plot, f.legend)
})
##' A generic function to plot and/or return the posterior of the skewness parameter for models in \pkg{BALD}.
##'
##' The skewness parameter does not directly correspond to the degree of skewness. However, all else being equal, a larger (in magnitude) skewness parameter indicates a higher degree of skewness,
##' and a skewness parameter of zero equates to zero skew.
##' See \code{vignette('BALD')}.
##'
##' @references
##' Kim, Y., and J. McCulloch (2007) \dQuote{The Skew-Student Distribution with Application to U.S. Stock Market Returns and the Equity Premium,} Department of Economics, Ohio State University, October 2007
##'
##' @name skewnessParameter
##' @param object The object from which to plot and/or return the skewness parameter.
##' @param plotDensity A logical value. If \code{TRUE}, then the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, then the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=skewnessParameter,AnnualAggLossDevModelOutput-method]{skewnessParameter("AnnualAggLossDevModelOutput")}}
##' @exportMethod skewnessParameter
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' skewnessParameter(standard.model.output)
##' }
setGenericVerif('skewnessParameter',
function(object, plotDensity=TRUE, plotTrace=TRUE)
standardGeneric('skewnessParameter'))
##' A method to plot and/or return the posterior of the skewness parameter for models in \pkg{BALD}.
##'
##' The skewness parameter does not directly correspond to the degree of skewness. However, all else being equal, a larger (in magnitude) skewness parameter indicates a higher degree of skewness,
##' and a skewness parameter of zero equates to zero skew.
##'
##' @references
##' Kim, Y., and J. McCulloch (2007) \dQuote{The Skew-Student Distribution with Application to U.S. Stock Market Returns and the Equity Premium,} Department of Economics, Ohio State University, October 2007
##'
##' @name skewnessParameter,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the skewness parameter.
##' @param plotDensity A logical value. If \code{TRUE}, then the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, then the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting. But also returns a named array with some select quantiles of the posterior for the skewness parameter. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{skewnessParameter}}
##' @importFrom stats integrate
setMethod('skewnessParameter',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
if(!object@input@allowForSkew)
{
warning('Cannot call "skewnessParameter" unless the model was estimated with a skewed-t. Returning "NULL" invisibly.')
return(invisible(NULL))
}
jd <- getJagsData(object@input)
precision.for.skewness <- jd$precision.for.skewness
df.for.skewness <- jd$df.for.skewness
mu <- 0
d.un <- function(x)
{
gamma((df.for.skewness+1)/2) / gamma(df.for.skewness / 2) * (precision.for.skewness/ df.for.skewness / pi) ^ 0.5 * (1 + precision.for.skewness / df.for.skewness * (x - mu)^2) ^ (-(df.for.skewness + 1) / 2)
}
l <- integrate (d.un, lower = -Inf, upper = jd$bounds.for.skewness[1])$value
u <- integrate (d.un, lower = -Inf, upper = jd$bounds.for.skewness[2])$value
d <- function(x)
{
if(x < jd$bounds.for.skewness[1] || x > jd$bounds.for.skewness[2])
return(0)
return(d.un(x) / (u - l))
}
ans <- plot.density.and.or.trace(coda=slot(object@beta, 'value')[1,,],
plotDensity = plotDensity ,
plotTrace = plotTrace,
d.prior=d,
nice.parameter.name='Skewness Parameter',
zero.line=TRUE,
lower.bound=jd$bounds.for.skewness[1],
upper.bound=jd$bounds.for.skewness[2])
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior of the autoregressive parameter for models in \pkg{BALD}.
##' See \code{vignette('BALD')}.
##'
##'
##' @name autoregressiveParameter
##' @param object The object from which to plot and/or return the autoregressive parameter.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=autoregressiveParameter,AnnualAggLossDevModelOutput-method]{autoregressiveParameter("AnnualAggLossDevModelOutput")}}
##' \code{\link{standardDeviationOfCalendarYearEffect}}
##' \code{\link{calendarYearEffect}}
##' \code{\link{calendarYearEffectErrors}}
##' @exportMethod autoregressiveParameter
##' @examples
##' rm(list=ls())
##' library(BALD)
##' data(CumulativeAutoBodilyInjuryTriangle)
##' CumulativeAutoBodilyInjuryTriangle <- as.matrix(CumulativeAutoBodilyInjuryTriangle)
##' sample.col <- (dim(CumulativeAutoBodilyInjuryTriangle)[2] - 6:0)
##' data(HPCE)
##' HPCE <- as.matrix(HPCE)[,1]
##' HPCE.rate <- HPCE[-1] / HPCE[-length(HPCE)] - 1
##' print(HPCE.rate[(-10):0 + length(HPCE.rate)])
##' HPCE.years <- as.integer(names(HPCE.rate))
##' max.exp.year <- max(as.integer(
##' dimnames(CumulativeAutoBodilyInjuryTriangle)[[1]]))
##' years.to.keep <- HPCE.years <= max.exp.year + 3
##' HPCE.rate <- HPCE.rate[years.to.keep]
##' break.model.input.w.ar1 <- makeBreakAnnualInput(
##' cumulative.payments = CumulativeAutoBodilyInjuryTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = HPCE.rate,
##' first.year.in.new.regime = c(1986, 1987),
##' prior.for.first.year.in.new.regime=c(2,1),
##' exp.year.type = 'ay',
##' extra.dev.years = 5,
##' use.skew.t = TRUE,
##' bound.for.skewness.parameter=5,
##' use.ar1.in.calendar.year = TRUE)
##' \dontrun{
##' break.model.output.w.ar1 <- runLossDevModel(
##' break.model.input.w.ar1,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' calendarYearEffectAutoregressiveParameter(break.model.output.w.ar1)
##' }
setGenericVerif('autoregressiveParameter',
function(object, plotDensity=TRUE, plotTrace=TRUE)
{
.Deprecated('calendarYearEffectAutoregressiveParameter')
standardGeneric('autoregressiveParameter')
})
##' A method to plot and/or return the posterior of the autoregressive parameter for models in \pkg{BALD}.
##'
##' @name autoregressiveParameter,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the autoregressive parameter.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with select quantiles of the posterior for the autoregressive parameter. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{autoregressiveParameter}}
##' \code{\link{standardDeviationOfCalendarYearEffect}}
##' \code{\link{calendarYearEffect}}
##' \code{\link{calendarYearEffectErrors}}
setMethod('autoregressiveParameter',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
calendarYearEffectAutoregressiveParameter(object, plotDensity, plotTrace)
})
##' A generic function to plot and/or return the posterior of the autoregressive parameter for the calendar year effect for models in \pkg{BALD}.
##' See \code{vignette('BALD')}.
##'
##'
##' @name calendarYearEffectAutoregressiveParameter
##' @param object The object from which to plot and/or return the autoregressive parameter which is associated with the calendar year effect.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=calendarYearEffectAutoregressiveParameter,AnnualAggLossDevModelOutput-method]{calendarYearEffectAutoregressiveParameter("AnnualAggLossDevModelOutput")}}
##' \code{\link{standardDeviationOfCalendarYearEffect}}
##' \code{\link{calendarYearEffect}}
##' \code{\link{calendarYearEffectErrors}}
##' @exportMethod calendarYearEffectAutoregressiveParameter
##' @examples
##' rm(list=ls())
##' library(BALD)
##' data(CumulativeAutoBodilyInjuryTriangle)
##' CumulativeAutoBodilyInjuryTriangle <- as.matrix(CumulativeAutoBodilyInjuryTriangle)
##' sample.col <- (dim(CumulativeAutoBodilyInjuryTriangle)[2] - 6:0)
##' print(decumulate(CumulativeAutoBodilyInjuryTriangle)[1:7, sample.col])
##' data(HPCE)
##' HPCE <- as.matrix(HPCE)[,1]
##' HPCE.rate <- HPCE[-1] / HPCE[-length(HPCE)] - 1
##' print(HPCE.rate[(-10):0 + length(HPCE.rate)])
##' HPCE.years <- as.integer(names(HPCE.rate))
##' max.exp.year <- max(as.integer(dimnames(CumulativeAutoBodilyInjuryTriangle)[[1]]))
##' years.to.keep <- HPCE.years <= max.exp.year + 3
##' HPCE.rate <- HPCE.rate[years.to.keep]
##' break.model.input.w.ar1 <- makeBreakAnnualInput(
##' cumulative.payments = CumulativeAutoBodilyInjuryTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = HPCE.rate,
##' first.year.in.new.regime = c(1986, 1987),
##' prior.for.first.year.in.new.regime=c(2,1),
##' exp.year.type = 'ay',
##' extra.dev.years = 5,
##' use.skew.t = TRUE,
##' bound.for.skewness.parameter=5,
##' use.ar1.in.calendar.year = TRUE)
##' \dontrun{
##' break.model.output.w.ar1 <- runLossDevModel(
##' break.model.input.w.ar1,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' calendarYearEffectAutoregressiveParameter(break.model.output.w.ar1)
##' }
setGenericVerif('calendarYearEffectAutoregressiveParameter',
function(object, plotDensity=TRUE, plotTrace=TRUE)
{
standardGeneric('calendarYearEffectAutoregressiveParameter')
})
##' A method to plot and/or return the posterior of the autoregressive parameter for the calendar year effect for models in \pkg{BALD}.
##'
##' @name calendarYearEffectAutoregressiveParameter,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the autoregressive parameter which is associated with the calendar year effect.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with select quantiles of the posterior for the autoregressive parameter. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{calendarYearEffectAutoregressiveParameter}}
##' \code{\link{standardDeviationOfCalendarYearEffect}}
##' \code{\link{calendarYearEffect}}
##' \code{\link{calendarYearEffectErrors}}
setMethod('calendarYearEffectAutoregressiveParameter',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
if(!object@input@ar1InCalendarYearEffect)
{
warning('Cannot call "calendarYearEffectAutoregressiveParameter" unless the model was estimated with a autoregressive error term in the calendar year effect. Returning "NULL" invisibly.')
return(invisible(NULL))
}
jd <- getJagsData(object@input)
ans <- plot.density.and.or.trace(coda=slot(object@rho, 'value')[1,,],
plotDensity = plotDensity ,
plotTrace = plotTrace,
d.prior=function(x) dbeta(x, jd$rho.prior[1], jd$rho.prior[2]),
nice.parameter.name='Calendar Year AR Parameter',
zero.line=FALSE,
lower.bound=0,
upper.bound=1)
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior of the autoregressive parameter for the exposure growth for models in \pkg{BALD}.
##' See \code{vignette('BALD')}.
##'
##'
##' @name exposureGrowthAutoregressiveParameter
##' @param object The object from which to plot and/or return the autoregressive parameter which is associated with exposure growth.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=exposureGrowthAutoregressiveParameter,AnnualAggLossDevModelOutput-method]{exposureGrowthAutoregressiveParameter("AnnualAggLossDevModelOutput")}}
##' @exportMethod exposureGrowthAutoregressiveParameter
##' @examples
##' rm(list=ls())
##' library(BALD)
##' data(CumulativeAutoBodilyInjuryTriangle)
##' CumulativeAutoBodilyInjuryTriangle <- as.matrix(CumulativeAutoBodilyInjuryTriangle)
##' sample.col <- (dim(CumulativeAutoBodilyInjuryTriangle)[2] - 6:0)
##' print(decumulate(CumulativeAutoBodilyInjuryTriangle)[1:7, sample.col])
##' data(HPCE)
##' HPCE <- as.matrix(HPCE)[,1]
##' HPCE.rate <- HPCE[-1] / HPCE[-length(HPCE)] - 1
##' print(HPCE.rate[(-10):0 + length(HPCE.rate)])
##' HPCE.years <- as.integer(names(HPCE.rate))
##' max.exp.year <- max(as.integer(dimnames(CumulativeAutoBodilyInjuryTriangle)[[1]]))
##' years.to.keep <- HPCE.years <= max.exp.year + 3
##' HPCE.rate <- HPCE.rate[years.to.keep]
##' break.model.input.w.ar1 <- makeBreakAnnualInput(
##' cumulative.payments = CumulativeAutoBodilyInjuryTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = HPCE.rate,
##' first.year.in.new.regime = c(1986, 1987),
##' prior.for.first.year.in.new.regime=c(2,1),
##' exp.year.type = 'ay',
##' extra.dev.years = 5,
##' use.skew.t = TRUE,
##' bound.for.skewness.parameter=5,
##' use.ar1.in.exposure.growth = TRUE)
##' \dontrun{
##' break.model.output.w.ar1 <- runLossDevModel(
##' break.model.input.w.ar1,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' exposureGrowthAutoregressiveParameter(break.model.output.w.ar1)
##' }
setGenericVerif('exposureGrowthAutoregressiveParameter',
function(object, plotDensity=TRUE, plotTrace=TRUE)
{
standardGeneric('exposureGrowthAutoregressiveParameter')
})
##' A method to plot and/or return the posterior of the autoregressive parameter for the exposure growth for models in \pkg{BALD}.
##'
##' @name exposureGrowthAutoregressiveParameter,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the autoregressive parameter which is associated with exposure growth.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with select quantiles of the posterior for the autoregressive parameter. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{exposureGrowthAutoregressiveParameter}}
setMethod('exposureGrowthAutoregressiveParameter',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
if(!object@input@ar1InExposureGrowth)
{
warning('Cannot call "exposureGrowthAutoregressiveParameter" unless the model was estimated with a autoregressive error term in the calendar year effect. Returning "NULL" invisibly.')
return(invisible(NULL))
}
jd <- getJagsData(object@input)
ans <- plot.density.and.or.trace(coda=slot(object@rho.eta, 'value')[1,,],
plotDensity = plotDensity ,
plotTrace = plotTrace,
d.prior=function(x) dbeta(x, jd$rho.eta.prior[1], jd$rho.eta.prior[2]),
nice.parameter.name='Exposure Growth AR Parameter',
zero.line=FALSE,
lower.bound=0,
upper.bound=1)
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior of the mean exposure growth for models in \pkg{BALD}.
##'
##' (Optionally) exposure growth is modeled as an ar1 process. This inherently assumes that periods of high exposure growth are (or at least have the possibility of being) followed by continued high periods.
##' See \code{vignette('BALD')}.
##'
##' @name meanExposureGrowth
##' @param object The object from which to plot and/or return the mean exposure growth.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=meanExposureGrowth,AnnualAggLossDevModelOutput-method]{meanExposureGrowth("AnnualAggLossDevModelOutput")}}
##' @exportMethod meanExposureGrowth
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' meanExposureGrowth(standard.model.output)
##' }
setGenericVerif('meanExposureGrowth',
function(object, plotDensity=TRUE, plotTrace=TRUE)
standardGeneric('meanExposureGrowth'))
##' A method to plot and/or return the posterior of the mean exposure growth for models in \pkg{BALD}.
##'
##'
##' @name meanExposureGrowth,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the mean exposure growth.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with select quantiles of the posterior for the mean exposure growth. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{meanExposureGrowth}}
setMethod('meanExposureGrowth',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
ans <- plot.density.and.or.trace(coda=slot(object@eta.mu, 'value')[1,,],
plotDensity = plotDensity ,
plotTrace = plotTrace,
d.prior=function(x) dnorm(x, 0, sqrt(1/getJagsData(object@input)$precision.for.eta.mu)),
nice.parameter.name='Mean Exposure Growth',
zero.line=TRUE)
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior of the degrees of freedom for the Student-\eqn{t} in models in \pkg{BALD}.
##'
##' When there is zero skew, the degrees of freedom are the degrees of freedom for the non-skewed \eqn{t}.
##' See \code{vignette('BALD')}.
##'
##' @references
##' Kim, Y., and J. McCulloch (2007) \dQuote{The Skew-Student Distribution with Application to U.S. Stock Market Returns and the Equity Premium,} Department of Economics, Ohio State University, October 2007.
##'
##' @name degreesOfFreedom
##' @param object The object from which to plot and/or return the degrees of freedom.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=degreesOfFreedom,AnnualAggLossDevModelOutput-method]{degreesOfFreedom("AnnualAggLossDevModelOutput")}}
##' @exportMethod degreesOfFreedom
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' degreesOfFreedom(standard.model.output)
##' }
setGenericVerif('degreesOfFreedom',
function(object, plotDensity=TRUE, plotTrace=TRUE)
standardGeneric('degreesOfFreedom'))
##' A method to plot and/or return the posterior of the degrees of freedom for the Student-\eqn{t} in models in \pkg{BALD}.
##'
##'
##' @name degreesOfFreedom,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the degrees of freedom.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with some select quantiles of the posterior for the degrees of freedom. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{degreesOfFreedom}}
setMethod('degreesOfFreedom',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
jd <- getJagsData(object@input)
ans <- plot.density.and.or.trace(coda=slot(object@df, 'value')[1,,],
plotDensity = plotDensity ,
plotTrace = plotTrace,
d.prior=function(x) dchisq(x, df=jd$df.k) / (pchisq(jd$df.bounds[2], jd$df.k) - pchisq(jd$df.bounds[1], jd$df.k)),
nice.parameter.name='Degrees of Freedom',
zero.line=FALSE,
lower.bound=jd$df.bounds[1],
upper.bound=jd$df.bounds[2])
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior of the standard deviation of the exposure growth rate for models in \pkg{BALD}.
##' See \code{vignette('BALD')}.
##'
##' @name standardDeviationOfExposureGrowth
##' @param object The object from which to plot and/or return the standard deviation of the exposure growth rate.
##' @param plotDensity A logical value. If \code{TRUE}, then the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, then the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=standardDeviationOfExposureGrowth,AnnualAggLossDevModelOutput-method]{standardDeviationOfExposureGrowth("AnnualAggLossDevModelOutput")}}
##' @exportMethod standardDeviationOfExposureGrowth
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' standardDeviationOfExposureGrowth(standard.model.output)
##' }
setGenericVerif('standardDeviationOfExposureGrowth',
function(object, plotDensity=TRUE, plotTrace=TRUE)
standardGeneric('standardDeviationOfExposureGrowth'))
##' A method to plot and/or return the posterior of the standard deviation of rhe exposure growth rate for models in \pkg{BALD}.
##'
##' @name standardDeviationOfExposureGrowth,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the standard deviation of the exposure growth rate.
##' @param plotDensity A logical value. If \code{TRUE}, then the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, then the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with select quantiles of the posterior for the standard deviation of exposure growth. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{standardDeviationOfExposureGrowth}}
##' \code{\link{exposureGrowth}}
##' \code{\link{meanExposureGrowth}}
setMethod('standardDeviationOfExposureGrowth',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
jd <- getJagsData(object@input)
ans <- plot.density.and.or.trace(coda=slot(object@sigma.eta, 'value')[1,,],
plotDensity = plotDensity ,
plotTrace = plotTrace,
d.prior=function(x) dunif(x, jd$sigma.eta.bounds[1], jd$sigma.eta.bounds[2]),
nice.parameter.name='Exposure Growth Standard Deviation',
zero.line=FALSE,
lower.bound=jd$sigma.eta.bounds[1])
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior of the standard deviation of the calendar year effect for models in \pkg{BALD}.
##' See \code{vignette('BALD')}.
##'
##' @name standardDeviationOfCalendarYearEffect
##' @param object The object from which to plot and/or return the standard deviation of the calendar year effect.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @exportMethod standardDeviationOfCalendarYearEffect
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' standardDeviationOfCalendarYearEffect(standard.model.output)
##' }
##'
##' @seealso \code{\link[=standardDeviationOfCalendarYearEffect,AnnualAggLossDevModelOutput-method]{standardDeviationOfCalendarYearEffect("AnnualAggLossDevModelOutput")}}
##' \code{\link{calendarYearEffect}}
##' \code{\link{calendarYearEffectErrors}}
##' \code{\link{autoregressiveParameter}}
setGenericVerif('standardDeviationOfCalendarYearEffect',
function(object, plotDensity=TRUE, plotTrace=TRUE)
standardGeneric('standardDeviationOfCalendarYearEffect'))
##' A method to plot and/or return the posterior of the standard deviation of the calendar year effect for models in \pkg{BALD}.
##'
##' @name standardDeviationOfCalendarYearEffect,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the standard deviation of the calendar year effect.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with select quantiles of the posterior for the standard deviation of the calendar year effect. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{standardDeviationOfCalendarYearEffect}}
##' \code{\link{calendarYearEffect}}
##' \code{\link{calendarYearEffectErrors}}
##' \code{\link{autoregressiveParameter}}
setMethod('standardDeviationOfCalendarYearEffect',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
jd <- getJagsData(object@input)
ans <- plot.density.and.or.trace(coda=slot(object@sigma.kappa, 'value')[1,,],
plotDensity = plotDensity ,
plotTrace = plotTrace,
d.prior=function(x) dunif(x, jd$sigma.kappa.bounds[1], jd$sigma.kappa.bounds[2]),
nice.parameter.name='Calendar Effect Standard Deviation',
zero.line=FALSE,
lower.bound=jd$sigma.kappa.bounds[1])
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior of the standard deviation for the innovation in the scale parameter for models in \pkg{BALD}.
##'
##' Changes in the scale parameter (see \code{\link{scaleParameter}}) are assumed to follow a second-order random walk on the log scale.
##' This function plots the posterior standard deviation for this random walk.
##' See \code{vignette('BALD')}.
##'
##' @name standardDeviationForScaleInnovation
##' @param object The object from which to plot and/or return the standard deviation for the innovation in the log of the scale parameter.
##' @param plotDensity A logical value. If \code{TRUE}, then the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, then the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=standardDeviationForScaleInnovation,AnnualAggLossDevModelOutput-method]{standardDeviationForScaleInnovation("AnnualAggLossDevModelOutput")}}
##' \code{\link{standardDeviationVsDevelopmentTime}}
##' @exportMethod standardDeviationForScaleInnovation
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' standardDeviationForScaleInnovation(standard.model.output)
##' }
setGenericVerif('standardDeviationForScaleInnovation',
function(object, plotDensity=TRUE, plotTrace=TRUE)
standardGeneric('standardDeviationForScaleInnovation'))
##' A method to plot and/or return the posterior of the standard deviation for the innovation in the scale parameter for models in \pkg{BALD}.
##'
##' @name standardDeviationForScaleInnovation,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the standard deviation for the innovation in the log of the scale parameter.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with some select quantiles of the posterior for the standard deviation in question. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{standardDeviationForScaleInnovation}}
##' \code{\link{scaleParameter}}
##' \code{\link{standardDeviationVsDevelopmentTime}}
setMethod('standardDeviationForScaleInnovation',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
if(object@input@noChangeInScaleParameterAfterColumn <= 2)
{
warning('Cannot call "standardDeviationForScaleInnovation" unless the model was estimated with at least three columns with different scales. Returning "NULL" invisibly.')
return(invisible(NULL))
}
jd <- getJagsData(object@input)
ans <- plot.density.and.or.trace(coda=slot(object@sigma.h.2.log.innov, 'value')[1,,],
plotDensity = plotDensity,
plotTrace = plotTrace,
draw.prior = FALSE,
nice.parameter.name='Scale Innovation Standard Deviation',
zero.line=FALSE,
lower.bound=0)
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior of the scale parameter for the Student-\eqn{t} measurement equation for models in \pkg{BALD}.
##'
##' As the degrees of freedom of the \eqn{t} goes to infinity, the scale parameter is the standard deviation of the resulting normal distribution (assuming zero skew).
##' See \code{vignette('BALD')}.
##'
##' @name scaleParameter
##' @param object The object from which to plot and/or return the scale parameter.
##' @param column The scale parameter is allowed to vary with development time. Setting \code{column} results in the plotting and returning of the scale parameter corresponding to that column. Default value is \code{1}.
##' @param plotDensity A logical value. If \code{TRUE}, then the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, then the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=scaleParameter,AnnualAggLossDevModelOutput-method]{scaleParameter("AnnualAggLossDevModelOutput")}}
##' @exportMethod scaleParameter
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' scaleParameter(standard.model.output)
##' }
setGenericVerif('scaleParameter',
function(object, column=1, plotDensity=TRUE, plotTrace=TRUE)
{
standardGeneric('scaleParameter')
})
##' A method to plot and/or return the posterior of the scale parameter for the Student-\eqn{t} measurement equation for models in \pkg{BALD}.
##'
##' @name scaleParameter,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the scale parameter.
##' @param column The scale parameter is allowed to vary with development time. Setting \code{column} results in the plotting and returning of the scale parameter corresponding to that column. Default value is \code{1}.
##' @param plotDensity A logical value. If \code{TRUE}, then the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, then the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with select quantiles of the posterior for the scale parameter. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{scaleParameter}}
setMethod('scaleParameter',
signature(object='AnnualAggLossDevModelOutput'),
function(object, column, plotDensity, plotTrace)
{
if(!is.numeric(column))
stop('"column" must be numeric')
if(!identical(length(column), as.integer(1)))
stop('"column" must be of length 1')
if(column < 1 || column > getTriDim(object@input)[1])
stop('"column" must be greater than 0 and less than the number of columns in the supplied incremental triangle.')
jd <- getJagsData(object@input)
ans <- plot.density.and.or.trace(coda=slot(object@h, 'value')[column,,],
plotDensity = plotDensity ,
plotTrace = plotTrace,
draw.prior=FALSE,
nice.parameter.name=paste('Scale Parameter:', column),
zero.line=FALSE,
lower.bound=0)
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior of the stochastic inflation rho parameter for models in \pkg{BALD}.
##'
##' If the model incorporates a stochastic rate of inflation, then that rate is assumed to follow (on the log scale) an autoregressive process of order 1.
##' (The autoregressive process of order 1 is the discrete equivalent to an Ornstein-Uhlenbeck process.)
##' This function plots the posterior for the \eqn{rho} parameter, assuming one was estimated.
##' See \code{vignette('BALD')}.
##'
##' @name stochasticInflationRhoParameter
##' @param object The object from which to plot and/or return the stochastic inflation \eqn{rho} parameter.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=stochasticInflationRhoParameter,AnnualAggLossDevModelOutput-method]{stochasticInflationRhoParameter("AnnualAggLossDevModelOutput")}}
##' \code{\link{stochasticInflationStationaryMean}}
##' \code{\link{stochasticInflation}}
##' @exportMethod stochasticInflationRhoParameter
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' stochasticInflationRhoParameter(standard.model.output)
##' }
setGenericVerif('stochasticInflationRhoParameter',
function(object, plotDensity=TRUE, plotTrace=TRUE)
{
standardGeneric('stochasticInflationRhoParameter')
})
##' A method to plot and/or return the posterior of the stochastic inflation \eqn{rho} parameter for models in \pkg{BALD}.
##'
##' @name stochasticInflationRhoParameter,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the stochastic inflation \eqn{rho} parameter.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with select quantiles of the \eqn{rho} parameter. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{stochasticInflationRhoParameter}}
##' \code{\link{stochasticInflationStationaryMean}}
##' \code{\link{stochasticInflation}}
setMethod('stochasticInflationRhoParameter',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
if(identical(object@input@stochInflationRate,0) || !is.na(object@input@knownStochInflationPersistence))
{
warning('Cannot call "stochasticInflationRhoParameter" unless 1) there is a stochastic rate of inflation and 2) the rho is not known. Returning "NULL" invisibly.')
return(invisible(NULL))
}
jd <- getJagsData(object@input)
ans <- plot.density.and.or.trace(coda=slot(object@a.ou, 'value')[1,,],
plotDensity = plotDensity ,
plotTrace = plotTrace,
d.prior = function(x) dbeta(x, jd$a.ou.prior[1], jd$a.ou.prior[2]),
nice.parameter.name='Inflation Autoregressive Parameter',
zero.line=FALSE,
lower.bound=0,
upper.bound=1)
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior of the stochastic inflation stationary mean for models in \pkg{BALD}.
##'
##' If the model incorporates a stochastic rate of inflation, then that rate is assumed to follow (on the log scale) an autoregressive process of order 1.
##' (The autoregressive process of order 1 is the discrete equivalent to an Ornstein-Uhlenbeck process.)
##' This function plots the posterior for the stationary mean (on the log scale), assuming such a mean was estimated.
##' See \code{vignette('BALD')}.
##'
##' @name stochasticInflationStationaryMean
##' @param object The object from which to plot and/or return the stochastic inflation stationary mean.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=stochasticInflationStationaryMean,AnnualAggLossDevModelOutput-method]{stochasticInflationStationaryMean("AnnualAggLossDevModelOutput")}}
##' \code{\link{stochasticInflationRhoParameter}}
##' \code{\link{stochasticInflation}}
##' @exportMethod stochasticInflationStationaryMean
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' stochasticInflationStationaryMean(standard.model.output)
##' }
setGenericVerif('stochasticInflationStationaryMean',
function(object, plotDensity=TRUE, plotTrace=TRUE)
{
standardGeneric('stochasticInflationStationaryMean')
})
##' A method to plot and/or return the posterior of the stochastic inflation stationary mean for models in \pkg{BALD}.
##'
##' @name stochasticInflationStationaryMean,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the stochastic inflation stationary mean.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with select quantiles of the stochastic inflation stationary mean. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{stochasticInflationStationaryMean}}
##' \code{\link{stochasticInflationRhoParameter}}
##' \code{\link{stochasticInflation}}
setMethod('stochasticInflationStationaryMean',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
if(identical(object@input@stochInflationRate,0) || !is.na(object@input@knownStochInflationMean))
{
warning('Cannot call "stochasticInflationStationaryMean" unless 1) there is a stochastic rate of inflation and 2) the mean is not known. Returning "NULL" invisibly.')
return(invisible(NULL))
}
coda <- slot(object@b.ou, 'value')[1,,] / (1 - slot(object@a.ou, 'value')[1,,])
ans <- plot.density.and.or.trace(coda=coda,
plotDensity = plotDensity ,
plotTrace = plotTrace,
draw.prior = FALSE,
nice.parameter.name='(Log) Inflation Stationary Mean',
zero.line=TRUE)
return(invisible(ans))
})
##' A generic function to plot and/or return predicted and forecast stochastic inflation rates for models in \pkg{BALD}.
##'
##' If the model incorporates a stochastic rate of inflation, then that rate is assumed to follow (on the log scale) an autoregressive process of order 1.
##' (The autoregressive process of order 1 is the discrete equivalent to an Ornstein-Uhlenbeck process.)
##' This function plots the median of the posterior predictive distribution for stochastic inflation (not on the log scale) rates by year.
##' Values are returned prior to the application of any limits or weights.
##' Note that for years where observed values are supplied, the model takes those values at face value.
##' See \code{vignette('BALD')}.
##'
##' @name stochasticInflation
##' @param object The object from which to plot and/or return the stochastic inflation rates.
##' @param extraYears An integer expressing the (maximum) number of years to plot (beyond the final observed year). Must be at least zero. Default is 15.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=stochasticInflation,AnnualAggLossDevModelOutput-method]{stochasticInflation("AnnualAggLossDevModelOutput")}}
##' \code{\link{stochasticInflationRhoParameter}}
##' \code{\link{stochasticInflationStationaryMean}}
##' @exportMethod stochasticInflation
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' stochasticInflation(standard.model.output)
##' }
setGenericVerif('stochasticInflation',
function(object, extraYears=15, plot=TRUE)
{
if(!is.numeric(extraYears))
stop('"extraYears" must be numeric')
if(!identical(length(extraYears), as.integer(1)))
stop('"extraYears" must be of length 1')
if(extraYears < 0)
stop('"extraYears" must be at least zero.')
standardGeneric('stochasticInflation')
})
##' A method to plot and/or return predicted and forecast stochastic inflation rates for models in \pkg{BALD}.
##'
##' @name stochasticInflation,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return predicted and forecast stochastic inflation rates.
##' @param extraYears An integer expressing the (maximum) number of years to plot (beyond the final observed year). Must be at least zero.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array of the median predicted inflation rate (not on the log scale). Returned invisibly.
##' @docType methods
##' @seealso \code{\link{stochasticInflation}}
##' \code{\link{stochasticInflationRhoParameter}}
##' \code{\link{stochasticInflationStationaryMean}}
setMethod('stochasticInflation',
signature(object='AnnualAggLossDevModelOutput'),
function(object, extraYears, plot)
{
if(identical(object@input@stochInflationRate,0))
{
warning('Cannot call "stochasticInflation" unless there is a stochastic rate of inflation. Returning "NULL" invisibly.')
return(invisible(NULL))
}
#don't have to check extraYears because the generic does that for us
#the median holds up under log and exp so we don't have to calculate this draw by draw
simulated.inflation.rate <- exp(object@stoch.log.inf.pred@median) - 1
simulated.inflation.rate.ci <- apply(exp(slot(object@stoch.log.inf.pred, 'value')) - 1, 1, quantile, c(0.05, 0.95))
total.simulated.years <- length(simulated.inflation.rate)
simulated.years <- min(object@input@stochInflationYears) - 1 + 1:total.simulated.years
names(simulated.inflation.rate) <- simulated.years
observed.years <- object@input@stochInflationYears
total.observed.years <- length(observed.years)
observed.inflation.rate <- object@input@stochInflationRate
if(total.observed.years >= total.simulated.years)
extra.years <- 0
else
extra.years <- min(extraYears,
total.simulated.years - total.observed.years)
suppressWarnings( stat.mean <- stochasticInflationStationaryMean(object, plotDensity=FALSE, plotTrace=FALSE))
if(is.null(stat.mean))
{
stat.mean <- object@input@knownStochInflationMean
} else {
stat.mean <- stat.mean['50%']
}
f.plot <- function()
{
plot(x=range(observed.years) + c(0, extra.years),
y=range(observed.inflation.rate, simulated.inflation.rate[1:(total.observed.years + extra.years)], stat.mean),
xlab="Calendar Year",
ylab="Rate of Inflation (Actual and Predicted)",
type='n',
cex.axis=1.25,
cex.lab=1.25)
abline(h=stat.mean,
lwd=2,
col='gray',
lty=3)
lines(
x=observed.years,
y=observed.inflation.rate,
lwd=3,
col='gray')
lines(
x=observed.years,
y=simulated.inflation.rate[1:total.observed.years],
lwd=2,
col='black')
if(extra.years > 0 )
{
lines(
x=max(observed.years) + 1:extra.years,
y=simulated.inflation.rate[total.observed.years + 1:extra.years],
lwd=2,
col='black',
lty=1)
for(ind in c('5%', '95%'))
{
lines(
x=max(observed.years) + 1:extra.years,
y=simulated.inflation.rate.ci[ind, total.observed.years + 1:extra.years],
lwd=2,
col='gray',
lty=2)
}
}
}
f.legend <- function()
{
if(extra.years == 0)
legend('center',
c('Actual','Predicted', 'Stationary\nMean'),
col = c('gray','black', 'gray'),
lwd=c(3,2,2),
lty=c(1,1,3),
horiz=TRUE,
xpd=NA,
bty='n')
else
legend('center',
c('Actual', 'Predicted/\nForecast', '90 Percent\nCredible Interval', 'Stationary\nMean'),
col = c('gray', 'black', 'gray', 'gray'),
lwd=c(3, 2, 2, 2),
lty=c(1, 1, 2, 3),
horiz=TRUE,
xpd=NA,
bty='n')
}
if(plot)
plot.top.bottom(f.plot, f.legend)
return(invisible(simulated.inflation.rate))
})
##' A generic function to plot and/or return predicted and forecast calendar year effect errors for models in \pkg{BALD}.
##'
##' The calendar year effect is comprised of two components: 1) a prior expected value which may be unique to every cell (subject to weights and bounds) and 2) a diagonal-specific error term.
##' This function only plots and returns the error term, which includes an autoregressive component if the model is estimated with such a feature.
##' See \code{vignette('BALD')}.
##'
##' @name calendarYearEffectErrors
##' @param object The object from which to plot and/or return the calendar year effect errors.
##' @param extraYears An integer expressing the (maximum) number of years to plot (beyond the final observed calendar year). Must be greater than or equal to zero. Default is 15.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=calendarYearEffectErrors,AnnualAggLossDevModelOutput-method]{calendarYearEffectErrors("AnnualAggLossDevModelOutput")}}
##' \code{\link{calendarYearEffect}}
##' \code{\link{autoregressiveParameter}}
##' \code{\link{standardDeviationOfCalendarYearEffect}}
##' \code{\link{calendarYearEffectErrorTracePlot}}
##' @exportMethod calendarYearEffectErrors
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' calendarYearEffectErrors(standard.model.output)
##' }
setGenericVerif('calendarYearEffectErrors',
function(object, extraYears=15, plot=TRUE)
{
if(!is.numeric(extraYears))
stop('"extraYears" must be numeric')
if(!identical(length(extraYears), as.integer(1)))
stop('"extraYears" must be of length 1')
if(extraYears < 0)
stop('"extraYears" must be at least zero.')
standardGeneric('calendarYearEffectErrors')
})
##' A method to plot and/or return predicted and forecast calendar year effect errors for models in \pkg{BALD}.
##'
##' @name calendarYearEffectErrors,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the calendar year effect errors.
##' @param extraYears An integer expressing the (maximum) number of years to plot (beyond the final observed calendar year). Must greater than or equal to zero.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with the median predicted errors (not on the log scale). Returned invisibly.
##' @docType methods
##' @seealso \code{\link{calendarYearEffectErrors}}
##' \code{\link{calendarYearEffect}}
##' \code{\link{autoregressiveParameter}}
##' \code{\link{standardDeviationOfCalendarYearEffect}}
##' \code{\link{calendarYearEffectErrorTracePlot}}
setMethod('calendarYearEffectErrors',
signature(object='AnnualAggLossDevModelOutput'),
function(object, extraYears, plot)
{
##don't have to check extraYears because the generic does that for us
##the median holds up under log and exp so we don't have to calculate this draw by draw
kappa.error <- exp(object@kappa.log.error@median[-(1:2)]) - 1 #first value is for diagonal not in the triangle, second is for the first diagonal in the triangle which has no identifiable effect
total.years <- length(kappa.error)
years <- min(object@input@exposureYears) + 1 - 1 + 1:total.years
names(kappa.error) <- years
observed.years <- object@input@exposureYears[-1]
total.observed.years <- length(observed.years)
if(total.observed.years >= total.years)
extra.years <- 0
else
extra.years <- min(extraYears,
total.years - total.observed.years)
f.plot <- function()
{
plot(x=range(observed.years) + c(0, extra.years),
y=range(kappa.error[1:(total.observed.years + extra.years)]),
xlab="Calendar Year",
ylab="Calendar Effect Error",
type='n',
cex.axis=1.25,
cex.lab=1.25)
abline(h=0,
lty=2)
lines(
x=observed.years,
y=kappa.error[1:total.observed.years],
lwd=2,
col='black')
if(extra.years > 0 )
lines(
x=max(observed.years) + 1:extra.years,
y=kappa.error[total.observed.years + 1:extra.years],
lwd=2,
col='gray')
}
f.legend <- function()
{
if(extra.years == 0)
legend('center',
c('Estimated'),
col = c('black'),
lwd=c(2),
horiz=TRUE,
xpd=NA,
bty='n')
else
legend('center',
c('Estimated', 'Predicted'),
col = c('black', 'gray'),
lwd=c(2, 2),
horiz=TRUE,
xpd=NA,
bty='n')
}
if(plot)
plot.top.bottom(f.plot, f.legend)
return(invisible(kappa.error))
})
##' A generic function to plot and/or return the predicted and forecast calendar year effects for models in \pkg{BALD}.
##'
##' The calendar year effect is comprised of two components: 1) a prior expected value that may be unique to every cell (subject to weights and bounds) and 2) a diagonal-specific error term.
##' This function plots and returns the factor resulting from the combined effect of these two, which includes an autoregressive component if the model is estimated with such a feature.
##'
##' The first cell is \code{NA}. Values in the first column represent the rate of inflation/escalation to the corresponding cell from the cell in the same column but previous row.
##' Values in the 2nd column and beyond represent the rate of inflation/escalation to the corresponding cell from the cell in the same row but previous column.
##' See \code{vignette('BALD')}.
##'
##' @name calendarYearEffect
##' @param object The object from which to plot and/or return the calendar year effect.
##' @param restrictedSize A logical value. If \code{TRUE}, the plotted calendar year effect is restricted to the square of dimension equal to the observed triangle with which the model was estimated.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=calendarYearEffect,AnnualAggLossDevModelOutput-method]{calendarYearEffect("AnnualAggLossDevModelOutput")}}
##' \code{\link{calendarYearEffectErrors}}
##' \code{\link{autoregressiveParameter}}
##' \code{\link{standardDeviationOfCalendarYearEffect}}
##' \code{\link{calendarYearEffectErrorTracePlot}}
##' @exportMethod calendarYearEffect
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' calendarYearEffect(standard.model.output)
##' }
setGenericVerif('calendarYearEffect',
function(object, restrictedSize=FALSE, plot=TRUE)
{
standardGeneric('calendarYearEffect')
})
##' A method to plot and/or return predicted and forecast calendar year effects for models in \pkg{BALD}.
##'
##' The first cell is \code{NA}. Values in the first column represent the rate of inflation/escalation to the corresponding cell from the cell in the same column but previous row.
##' Values in the 2nd column and beyond represent the rate of inflation/escalation to the corresponding cell from the cell in the same row but previous column.
##'
##' @name calendarYearEffect,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the calendar year effect.
##' @param restrictedSize A logical value. If \code{TRUE}, the plotted calendar year effect is restricted to the square of dimension equal to the observed triangle with which the model was estimated.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with the median predicted values (not on the log scale). Returned invisibly.
##' @docType methods
##' @seealso \code{\link{calendarYearEffect}}
##' \code{\link{calendarYearEffectErrors}}
##' \code{\link{autoregressiveParameter}}
##' \code{\link{standardDeviationOfCalendarYearEffect}}
##' \code{\link{calendarYearEffectErrorTracePlot}}
setMethod('calendarYearEffect',
signature(object='AnnualAggLossDevModelOutput'),
function(object, restrictedSize, plot)
{
if(plot)
{
K <- getTriDim(object@input)[1]
kappa <- object@kappa@median
kappa[1,1] <- NA
if(restrictedSize)
kappa <- kappa[1:K,1:K]
all.exp.years <- min(object@input@exposureYears) - 1 + 1:dim(kappa)[1]
data <- expand.grid(row=all.exp.years, column=1:ncol(kappa))
data$z <- as.vector(as.numeric(kappa))
print(
levelplot(z ~ column * row,
data,
aspect='iso',
ylim=(range(all.exp.years) + c(-0.5, 0.5))[c(2,1)],
col.regions=grey(seq(from=1, to=0, length.out=100)),
xlab='Development Year',
ylab=getExposureYearLabel(object@input)))
}
ans <- object@kappa@median
ans[1,1] <- NA
dimnames(ans)[[1]] <- min(object@input@exposureYears) - 1 + 1:dim(ans)[1]
return(invisible(ans))
})
##' A generic function to plot predicted vs actual payments for models from the \pkg{BALD} package.
##'
##' Because the model is Bayesian, each estimated payment comes as a distribution.
##' The median of this distribution is used as a point estimate when plotting and/or returning values.
##' Note: One cannot calculate the estimated incremental payments from the estimated cumulative payments (and vice versa) since the median of sums need not be equal to the sum of medians.
##'
##' If \code{mergePredictedWithObserved=TRUE} and \code{type="incremental"}, then any observed incremental payment will be used in place of its corresponding incremental payment.
##' If \code{mergePredictedWithObserved=TRUE} and \code{type="cumulative"}, then only predicted incremental payments (by row) to the right of the last observed cumulative value will enter the calculation.
##' See \code{vignette('BALD')}.
##'
##' @name predictedPayments
##' @param object The object from which to plot predicted vs actual payments and from which to return predicted payments.
##' @param type A single character value specifying whether to plot/return the predicted incremental or cumulative payments. Valid values are \dQuote{incremental} or \dQuote{cumulative.} See details as to why these may not match up.
##' @param logScale A logical value. If \code{TRUE}, then values are plotted on a log scale.
##' @param mergePredictedWithObserved A logical value. See details.
##' @param plotObservedValues A logical value. If \code{FALSE}, then only the predicted values are plotted.
##' @param plotPredictedOnlyWhereObserved A logical value. If \code{TRUE}, then only the predicted incremental payments with valid corresponding observed (log) incremental payment are plotted. Ignored for \code{type="cumulative"}.
##' @param quantiles A vector of quantiles for the predicted payments to return. Useful for constructing credible intervals.
##' @param plot A logical value. If \code{TRUE}, then the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=predictedPayments,AnnualAggLossDevModelOutput-method]{predictedPayments("AnnualAggLossDevModelOutput")}}
##' @exportMethod predictedPayments
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' predictedPayments(standard.model.output)
##' }
setGenericVerif('predictedPayments',
function(object, type=c('incremental', 'cumulative'), logScale=TRUE, mergePredictedWithObserved=FALSE, plotObservedValues=logScale, plotPredictedOnlyWhereObserved=FALSE, quantiles=c(0.05, .5, 0.95), plot=TRUE)
standardGeneric('predictedPayments'))
##' A method to plot predicted vs actual payments for models from the \pkg{BALD} package.
##'
##' Because the model is Bayesian, each estimated payment comes as a distribution.
##' The median of this distribution is used as a point estimate when plotting and/or returning values.
##' Note: One cannot calculate the estimated incremental payments from the estimated cumulative payments (and vice versa) since the median of sums need not be equal to the sum of medians.
##'
##' If \code{mergePredictedWithObserved=TRUE} and \code{type="incremental"}, then any observed incremental payment will be used in place of its corresponding incremental payment.
##' If \code{mergePredictedWithObserved=TRUE} and \code{type="cumulative"}, then only predicted incremental payments (by row) to the right of the last observed cumulative value will enter the calculation.
##'
##' @name predictedPayments,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOutput} from which to plot predicted vs actual payments and return predicted payments.
##' @param type A singe character value specifying whether to plot/return the predicted incremental or cumulative payments. Valid values are "incremental" or "cumulative." See details as to why these may not match up.
##' @param logScale A logical value. If \code{TRUE}, then values are plotted on a log scale.
##' @param mergePredictedWithObserved A logical value. If \code{TRUE}, then the returned values treat observed incremental payments at "face value"; otherwise predicted values are used in place of observed values.
##' @param plotObservedValues A logical value. If \code{FALSE}, then only the predicted values are plotted.
##' @param plotPredictedOnlyWhereObserved A logical value. See details.
##' @param quantiles A vector of quantiles for the predicted payments to return. Usefull for constructing credible intervals.
##' @param plot A logical value. If \code{TRUE}, then the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array (with the same structure as the input triangle) containing the predicted log incremental payments. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{predictedPayments}}
setMethod('predictedPayments',
signature(object='AnnualAggLossDevModelOutput'),
function(object, type, logScale, mergePredictedWithObserved, plotObservedValues, plotPredictedOnlyWhereObserved, quantiles, plot)
{
type <- match.arg(type)
K <- getTriDim(object@input)[1]
inc.obs <- object@input@incrementals
cumul.obs <- object@input@cumulatives
inc.pred.coda <- slot(object@inc.pred, 'value')
inc.pred.median <- object@inc.pred@median
if(type == 'cumulative')
{
cumul.pred.coda <- array(NA, dim(inc.pred.coda))
cumul.pred.coda[,1,,] <- inc.pred.coda[,1,,]
for(i in 2:dim(inc.pred.coda)[2])
cumul.pred.coda[,i,,] <- cumul.pred.coda[,i-1,,] + inc.pred.coda[,i,,]
cumul.pred.median <- apply(cumul.pred.coda, c(1,2), median)
}
##trim down the predicted values to the size of the original triangle
inc.pred.median.trim <- inc.pred.median[1:K, 1:K]
##get rid of predicted values which do not correspond to observed values
inc.pred.median.trim.only.where.obs <- inc.pred.median.trim
inc.pred.median.trim.only.where.obs[inc.obs <= 0 | is.na(inc.obs)] <- NA
if(type == 'incremental')
{
ans <- inc.pred.coda
dimnames(ans)[[1]] <- min(object@input@exposureYears) - 1 + 1:dim(ans)[1]
if(mergePredictedWithObserved)
ans[1:K, 1:K, , ][!is.na(inc.obs)] <- inc.obs[!is.na(inc.obs)]
ans <- apply(ans, c(1, 2), quantile, quantiles)
} else {
if(!mergePredictedWithObserved)
{
ans <- cumul.pred.coda
dimnames(ans)[[1]] <- min(object@input@exposureYears) - 1 + 1:dim(ans)[1]
ans <- apply(ans, c(1, 2), quantile, quantiles)
} else {
tmp <- array(NA,
dim(inc.pred.coda),
dimnames=list(min(object@input@exposureYears) - 1 + 1:dim(inc.pred.coda)[1], NULL))
tmp[1:K, 1:K, , ] <- cumul.obs
for(i in 1:K)
{
j.lower <- which(!is.na(cumul.obs[i,]))
if(length(j.lower) == 0)
{
j.lower <- 1
} else {
j.lower <- max(j.lower)
}
for(j in (j.lower+1):(dim(inc.pred.coda)[2]))
tmp[i,j,,] <- tmp[i,j-1,,] + inc.pred.coda[i,j,,]
}
tmp[(K+1):(dim(cumul.pred.coda)[1]), , , ] <- cumul.pred.coda[(K+1):(dim(cumul.pred.coda)[1]), , , ]
ans <- apply(tmp, c(1,2), quantile, probs = quantiles, na.rm = TRUE)
}
}
plot.f <- function()
{
if(type == 'incremental')
{
if(plotPredictedOnlyWhereObserved)
{
inc.pred.for.plot <- inc.pred.median.trim.only.where.obs
} else {
inc.pred.for.plot <- inc.pred.median
}
if(logScale)
{
inc.pred.for.plot[inc.pred.for.plot <= 0] <- NA
inc.obs.for.plot <- inc.obs
inc.obs.for.plot[inc.obs.for.plot <= 0] <- NA
} else {
inc.obs.for.plot <- inc.obs
}
x.range <- c(1, dim(inc.pred.for.plot)[2])
if(plotObservedValues)
y.range <- range(inc.pred.for.plot, inc.obs.for.plot, na.rm=TRUE)
else
y.range <- range(inc.pred.for.plot, na.rm=TRUE)
plot(x=x.range,
y=y.range,
ylab='Incremental Payments',
xlab='Development Year',
type='n',
log=ifelse(logScale, 'y', ''),
cex.axis=1.25,
cex.lab=1.25)
for(i in 1:dim(inc.pred.for.plot)[1])
{
tmp <- inc.pred.for.plot[i,]
lines(x=1:length(tmp),
y=tmp,
col=get.color(i),
type=ifelse(plotPredictedOnlyWhereObserved, 'o', 'l'),
pch=20)
}
if(plotObservedValues)
{
for(i in 1:dim(inc.obs.for.plot)[1])
{
tmp <- inc.obs.for.plot[i,]
lines(x=1:length(tmp),
y=tmp,
type='p',
col=get.color(i))
}
}
} else {
cumul.pred.for.plot <- cumul.pred.median
if(logScale)
{
cumul.pred.for.plot[cumul.pred.for.plot <= 0] <- NA
cumul.obs.for.plot <- cumul.obs
cumul.obs.for.plot[cumul.obs.for.plot <= 0] <- NA
} else {
cumul.obs.for.plot <- cumul.obs
}
x.range <- c(1, dim(cumul.pred.for.plot)[2])
if(plotObservedValues)
y.range <- range(cumul.pred.for.plot, cumul.obs.for.plot, na.rm=TRUE)
else
y.range <- range(cumul.pred.for.plot, na.rm=TRUE)
plot(x=x.range,
y=y.range,
ylab='Cumulative Payments',
xlab='Development Year',
type='n',
log=ifelse(logScale, 'y', ''),
cex.axis=1.25,
cex.lab=1.25)
for(i in 1:dim(cumul.pred.for.plot)[1])
{
tmp <- cumul.pred.for.plot[i,]
lines(x=1:length(tmp),
y=tmp,
col=get.color(i))
}
if(plotObservedValues)
{
for(i in 1:dim(cumul.obs.for.plot)[1])
{
tmp <- cumul.obs.for.plot[i,]
lines(x=1:length(tmp),
y=tmp,
type='p',
col=get.color(i))
}
}
}
}
legend.f <- function()
{
if(plotObservedValues)
{
legend('center',
legend=c('Predicted','Observed'),
col='black',
lwd=2,
lty=c(1, NA),
pch=c(ifelse(type=='incremental' && plotPredictedOnlyWhereObserved, 20, NA), 1),
horiz=TRUE,
bty='n',
xpd=NA)
}
}
if(plot)
plot.top.bottom(plot.f, legend.f)
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior estimated standard deviation by development year.
##'
##' Aggregate loss development models in \pkg{BALD} allow for changes (by development year) in the measurement error around the log incremental payments.
##' This is a generic function that allows for the retrieval and illustration of this standard deviation.
##' See \code{vignette('BALD')}.
##'
##' @name standardDeviationVsDevelopmentTime
##' @param object The object from which to plot and/or return the estimated standard deviation by development year.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=standardDeviationVsDevelopmentTime,AnnualAggLossDevModelOutput-method]{standardDeviationVsDevelopmentTime("AnnualAggLossDevModelOutput")}}
##' @exportMethod standardDeviationVsDevelopmentTime
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' standardDeviationVsDevelopmentTime(standard.model.output)
##' }
setGenericVerif('standardDeviationVsDevelopmentTime',
function(object, plot=TRUE)
standardGeneric('standardDeviationVsDevelopmentTime'))
##' A method to plot and/or return the posterior estimated standard deviation by development year.
##'
##' Aggregate loss development models in \pkg{BALD} allow for changes (by development year) in the measurement error around the log incremental payments.
##' This is a method that allows for the retrieval and illustration of this standard deviation.
##'
##' @name standardDeviationVsDevelopmentTime,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOutput} from which to plot and/or return the estimated standard deviation by development year.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a numeric vector of the plotted statistics. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{standardDeviationVsDevelopmentTime}}
setMethod('standardDeviationVsDevelopmentTime',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plot)
{
K <- getTriDim(object@input)[1]
beta <- slot(object@beta, 'value')[1,,]
v <- slot(object@df, 'value')[1,,]
h <- slot(object@h, 'value')
st.d <- array(NA, c(3, K), list(c('95%', '50%', '5%'), NULL))
for(i in 1:K)
{
tmp <- quantile(
sqrt(h[i,,] ^ 2 * v / (v - 2) + 2 * beta ^ 2 * h[i,,] ^ 4 * v ^ 2 / (v - 2) ^ 2 / (v - 4)),
c(.95, .5, .05))
st.d[names(tmp),i] <- tmp
}
f.plot <- function()
{
matplot(y=t(st.d),
x=1:K,
col=c('grey', 'black', 'grey'),
lwd=c(3,2,3),
lty=c(2,1,2),
xlab='Development Year',
ylab="Standard Deviation in Measurement Equation",
cex.axis=1.25,
cex.lab=1.25,
type='l')
}
f.legend <- function()
{
legend('center',
legend=c('Median','90 Percent Credible Interval'),
col=c('black', 'grey'),
lwd=c(2,3),
lty=c(1,2),
horiz=TRUE,
bty='n',
xpd=NA)
}
if(plot)
plot.top.bottom(f.plot, f.legend)
return(invisible(st.d))
})
##' A generic function to generate the trace plots for select exposure growth rates.
##' See \code{vignette('BALD')}.
##'
##' @name exposureGrowthTracePlot
##' @param object The object from which to generate the trace plots.
##' @param elements A numeric vector indicating the elements for which to plot the trace. Valid values are 2 through the total number of exposure years. If NULL, values are selected automatically.
##' @return NULL invisibly. Only called for the side effect of plotting.
##' @seealso \code{\link[=exposureGrowthTracePlot,AnnualAggLossDevModelOutput-method]{exposureGrowthTracePlot("AnnualAggLossDevModelOutput")}}
##' \code{\link{exposureGrowth}}
##' @exportMethod exposureGrowthTracePlot
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' exposureGrowthTracePlot(standard.model.output)
##' }
setGenericVerif('exposureGrowthTracePlot',
function(object, elements=NULL)
standardGeneric('exposureGrowthTracePlot'))
##' A method to generate the trace plots for select exposure growth rates.
##'
##'
##' @name exposureGrowthTracePlot,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOutput} from which to generate the trace plots.
##' @param elements A numeric vector indicating the elements for which to plot the trace. If NULL, values are selected automatically.
##' @return NULL invisibly. Only called for the side effect of plotting.
##' @docType methods
##' @seealso \code{\link{exposureGrowthTracePlot}}
##' \code{\link{exposureGrowth}}
setMethod('exposureGrowthTracePlot',
signature(object='AnnualAggLossDevModelOutput'),
function(object, elements)
{
lb <- 2
ub <- object@input@totalExpYears
if(is.null(elements))
{
elements <- c(lb, floor((lb + ub) / 2), ub)
} else {
if(!is.numeric(elements) || !is.vector(elements))
stop('"elements" must either be "NULL" or a numeric vector')
if(any(elements != as.integer(elements)))
stop('"elements" must be coercible to integers')
if(any(elements > ub) || any(elements < lb))
stop(paste('"elements" must be at most the total number of exposure years (', ub,') and at least 2', sep=''))
}
plot.trace.plots(slot(object@eta, 'value')[elements,,], paste('Exposure Growth :', elements, sep=''))
})
##' A generic function to generate the trace plots for select calendar year effect errors.
##'
##' The calendar year effect is comprised of two components: 1) a prior expected value that may be unique to every cell and 2) a diagonal-specific error term.
##' This function generates trace plots for the diagonal specific error terms only.
##' See \code{vignette('BALD')}.
##'
##' @name calendarYearEffectErrorTracePlot
##' @param object The object from which to generate the trace plots.
##' @param elements A numeric vector indicating the elements for which to plot the trace. Valid values are 2 through the total number of exposure years(observed and forecast). If NULL, values are selected automatically.
##' @return NULL invisibly. Only called for the side effect of plotting.
##' @seealso \code{\link[=calendarYearEffectErrorTracePlot,AnnualAggLossDevModelOutput-method]{calendarYearEffectErrorTracePlot("AnnualAggLossDevModelOutput")}}
##' \code{\link{calendarYearEffectErrors}}
##' @exportMethod calendarYearEffectErrorTracePlot
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' calendarYearEffectErrorTracePlot(standard.model.output)
##' }
setGenericVerif('calendarYearEffectErrorTracePlot',
function(object, elements=NULL)
standardGeneric('calendarYearEffectErrorTracePlot'))
##' A method to generate the trace plots for select calendar year effect errors.
##'
##' @name calendarYearEffectErrorTracePlot,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOutput} from which to generate the trace plots.
##' @param elements A numeric vector indicating the elements for which to plot the trace. Valid values are 2 through the total number of exposure years. If NULL, values are selected automatically.
##' @return NULL invisibly. Only called for the side effect of plotting.
##' @docType methods
##' @seealso \code{\link{calendarYearEffectErrorTracePlot}}
##' \code{\link{calendarYearEffectErrors}}
setMethod('calendarYearEffectErrorTracePlot',
signature(object='AnnualAggLossDevModelOutput'),
function(object, elements)
{
lb <- 2
ub <- object@input@totalExpYears
if(is.null(elements))
{
elements <- c(lb, floor((lb + ub) / 2), ub)
} else {
if(!is.numeric(elements) || !is.vector(elements))
stop('"elements" must either be "NULL" or a numeric vector')
if(any(elements != as.integer(elements)))
stop('"elements" must be coercible to integers')
if(any(elements > ub) || any(elements < lb))
stop(paste('"elements" must be at most the total number of exposure years (', ub,') and at least 3', sep=''))
}
elements <- elements + 1 #in the model file, the first identifiable calendar year effect is numbered 3
plot.trace.plots(exp(slot(object@kappa.log.error, 'value')[elements,,]) - 1, paste('Calendar Year Effect Error :', elements, sep=''))
})
##' A generic function to plot and/or return a table of predicted age-to-age loss development factors (or link ratios).
##'
##' While the model estimates ultimate losses directly, comparisons of predicted to observed development factors can give the user a better feel for the model's adequacy.
##' Since the model is Bayesian, each development factor comes as a distribution. Only the median, as a point estimate, are plotted/returned.
##'
##' The age-to-age factors are the ratios of the cumulative paid values at one period to the previous period.
##' Note that the median of products is not the product of medians, and thus it is possible (or rather likely) that age-to-age factors will not line up with age-to-ultimate factors (see \code{\link{tailFactor}}).
##' See \code{vignette('BALD')}.
##'
##' @name lossDevelopmentFactors
##' @param object The object from which to plot and/or return loss development factors.
##' @param cex.text The \code{cex} value supplied to \code{text}. Adjusts the relative size of text.
##' @param linespace Adjusts the spacing between observed and predicted values.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a numeric matrix of plotted statistics.
##' @seealso \code{\link[=lossDevelopmentFactors,AnnualAggLossDevModelOutput-method]{lossDevelopmentFactors("AnnualAggLossDevModelOutput")}}
##' \code{\link{tailFactor}}
##' @exportMethod lossDevelopmentFactors
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' lossDevelopmentFactors(standard.model.output)
##' }
setGenericVerif('lossDevelopmentFactors',
function(object, cex.text=.77, linespace=0.5, plot=TRUE)
{
if(!is.numeric(cex.text) || length(cex.text) != 1)
stop('"cex.text" must be numeric of length 1')
if(!is.numeric(linespace) || length(linespace) != 1)
stop('"linespace" must be numeric of length 1')
standardGeneric('lossDevelopmentFactors')
})
##' A method to plot and/or return a table of predicted age-to-age loss development factors (or link ratios).
##'
##' @name lossDevelopmentFactors,AnnualAggLossDevModelOutput-method
##' @param object The object of type /code{AnnualAggLossDevModelOutput} from which to plot and/or return the factors.
##' @param cex.text The \code{cex} value supplied to \code{text}. Adjusts the relative size of text.
##' @param linespace Adjusts the spacing between observed and predicted values.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting, but also returns a numeric matrix of plotted statistics.
##' @docType methods
##' @seealso \code{\link{lossDevelopmentFactors}}
##' \code{\link{tailFactor}}
setMethod('lossDevelopmentFactors',
signature(object='AnnualAggLossDevModelOutput'),
function(object, cex.text, linespace, plot)
{
col.grey <- grey(0.45)
K <- getTriDim(object@input)[1]
obs.exp.years <- object@input@exposureYears
pred.exp.years <- max(obs.exp.years) + 1:(object@input@totalExpYears - K)
all.exp.years <- c(obs.exp.years, pred.exp.years)
ey.type <- object@input@triangleType
obs.ldf <- object@input@cumulatives[,-1] / object@input@cumulatives[,-K]
obs.ldf <- rbind(obs.ldf, array(NA, c(length(pred.exp.years), dim(obs.ldf)[2])))
ldf.pred <- array(NA, dim(obs.ldf))
inc.pred <- slot(object@inc.pred, 'value')
current.loss <- inc.pred[, 1, , ]
for(j in 1:dim(ldf.pred)[2])
{
next.loss <- current.loss + inc.pred[, j + 1, , ]
ldf.pred[,j] <- apply(next.loss / current.loss, 1, median, na.rm = TRUE)
current.loss <- next.loss
}
if(ey.type=='py')
{
sub <- 'First Column is Link From Half to First'
} else {
sub <- 'First Column is Link From First to Second'
}
plot(x=range(1:(K-1)) + c(-0.5, 0.5),
y=(range(obs.exp.years, pred.exp.years) + c(-0.5, 0.5))[c(2,1)],
ylim=(range(obs.exp.years, pred.exp.years) + c(-0.5, 0.5))[c(2,1)],
ylab=getExposureYearLabel(object@input),
xlab='Development Year',
cex.axis=1.25,
cex.lab=1.25,
type="n",
font.main=1,
cex.main=1.5,
sub=sub)
for (i in 1:length(all.exp.years))
{
for (j in 1:(K-1))
{
text(x=j,
y=all.exp.years[i],
as.character(format(ldf.pred[i,j], digits=4, nsmall=3)),
font=1,
cex=cex.text,
col='black')
text(x=j,
y=all.exp.years[i]+linespace,
ifelse(is.na(obs.ldf[i,j]), '-', as.character(format(obs.ldf[i,j], digits=4, nsmall=3))),
font=1,
cex=cex.text,
col=col.grey)
}
}
})
##' A generic function to plot the trace plots for select rate of decay values.
##' See \code{vignette('BALD')}.
##'
##' @name rateOfDecayTracePlot
##' @param object The object from which to generate the trace plots.
##' @param elements A numeric vector indicating for which elements to plot the trace. Valid values are 2 through the number of columns in the observed triangle. If NULL, values are selected automatically.
##' @param \dots Additional arguments used by methods.
##' @return NULL invisibly. Only called for the side effect of plotting.
##' @seealso \code{\link[=rateOfDecayTracePlot,StandardAnnualAggLossDevModelOutput-method]{rateOfDecayTracePlot("StandardAnnualAggLossDevModelOutput")}}
##' \code{\link{rateOfDecay}}
##' @exportMethod rateOfDecayTracePlot
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' rateOfDecayTracePlot(standard.model.output)
##' }
setGenericVerif('rateOfDecayTracePlot',
function(object, elements=NULL, ...)
standardGeneric('rateOfDecayTracePlot'))
##' A generic function to generate the trace plots for select consumption path values.
##' See \code{vignette('BALD')}.
##'
##' @name consumptionPathTracePlot
##' @param object The object from which to generate the trace plots.
##' @param elements A numeric vector indicating the elements for which to plot the trace. Valid values are 1 through the number of development years (columns) in the observed triangle. If NULL, values are selected automatically.
##' @param \dots Additional arguments used by methods.
##' @return NULL invisibly. Only called for the side effect of plotting.
##' @seealso \code{\link[=consumptionPathTracePlot,StandardAnnualAggLossDevModelOutput-method]{consumptionTracePlot("StandardAnnualAggLossDevModelOutput")}}
##' \code{\link{consumptionPath}}
##' @exportMethod consumptionPathTracePlot
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' consumptionPathTracePlot(standard.model.output)
##' }
setGenericVerif('consumptionPathTracePlot',
function(object, elements=NULL, ...)
standardGeneric('consumptionPathTracePlot'))
##' A generic function to plot and/or return the estimated consumption path vs development year time.
##'
##' At the heart of aggregate loss development models in \pkg{BALD} is the consumption path.
##' The consumption path is (on a log scale) the trajectory of incremental payments absent calendar year effects and with exposure normalized to the first row.
##' Note that the measurement error term is (possibly) a skewed \eqn{t} and as such (possibly) has a non zero mean. The consumption path is absent any such shifts due to skewness.
##' This is a generic function that allows for the retrieval and illustration of this consumption path.
##' See \code{vignette('BALD')}.
##'
##' @name consumptionPath
##' @param object The object from which to plot and/or return the estimated consumption path.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns the plotted statistics. Returned invisibly.
##' @seealso \code{\link[=consumptionPath,StandardAnnualAggLossDevModelOutput-method]{consumptionPath("StandardAnnualAggLossDevModelOutput")}}
##' \code{\link[=consumptionPath,BreakAnnualAggLossDevModelOutput-method]{consumptionPath("BreakAnnualAggLossDevModelOutput")}}
##' \code{\link{consumptionPathTracePlot}}
##' @exportMethod consumptionPath
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' consumptionPath(standard.model.output)
##' }
setGenericVerif('consumptionPath',
function(object, plot=TRUE)
standardGeneric('consumptionPath'))
##' A generic function to plot and/or return the esimtated rate of decay vs development year time.
##'
##' The simplest definition of the rate of decay is the exponentiated first difference of the \link[=consumptionPath]{consumption path}.
##' This is a generic function to allow for the retrieval and illustration of the rate of decay.
##' See \code{vignette('BALD')}.
##'
##' @name rateOfDecay
##' @param object The object from which to plot and/or return the estimated rate of decay.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns the plotted statistics. Returned invisibly.
##' @seealso \code{\link[=rateOfDecay,StandardAnnualAggLossDevModelOutput-method]{rateOfDecay("StandardAnnualAggLossDevModelOutput")}}
##' \code{\link[=rateOfDecay,BreakAnnualAggLossDevModelOutput-method]{rateOfDecay("BreakAnnualAggLossDevModelOutput")}}
##' \code{\link{consumptionPath}}
##' \code{\link{rateOfDecayTracePlot}}
##' @exportMethod rateOfDecay
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' rateOfDecay(standard.model.output)
##' }
setGenericVerif('rateOfDecay',
function(object, plot=TRUE)
standardGeneric('rateOfDecay'))
##' A generic function to plot and/or return the predicted tail factors for a specific attachment point.
##'
##' The tail factor is the ratio of the estimated ultimate loss to cumulative loss at some point in development time.
##' This is a generic function to allow for the retrieval and illustration the tail factor by exposure year.
##'
##' \bold{Note on \code{firstIsHalfReport} and \code{attachment}:} \code{firstIsHalfReport} refers to the first column of the triangle.
##' For policy year triangles, the first column is often referred to as a \dQuote{half-report}, the second column is called \dQuote{first-report}, the third column is called \dQuote{second-report}, etc.
##' If \code{firstIsHalfReport=TRUE}, then \code{tailFactor} will assume the triangle is arranged in such a way that the first column is the \dQuote{half-report}
##' and \code{attachment=1} indicates that the charted tail factor attaches at the cumulative loss through the second column. If \code{firstIsHalfReport=FALSE},
##' then \code{attachment=1} indicates that the charted tail factor attaches at the cumulative loss through the first column. Since \code{attachment} must be coercible to an integer,
##' it is impossible to plot half-to-ultimate tail factors; however, they are the first column in the returned matrix.
##'
##' \code{firstIsHalfReport} can be \code{NA} (the default)
##' if the exposure year type was specified to be one of \dQuote{policy year} or \dQuote{accident year} at the time the input object was constructed (see \code{\link{makeStandardAnnualInput}}
##' or \code{\link{makeBreakAnnualInput}}). An exposure year type of \dQuote{policy year} corresponds to \code{firstIsHalfReport=TRUE},
##' and an exposure year type of \dQuote{accident year} corresponds to \code{firstIsHalfReport=FALSE}. Setting \code{firstIsHalfReport} to a non-missing value will override this default.
##'
##' If \code{expYearRange} is \dQuote{fullyObs}, then only exposure years with a non missing value in the first column will be plotted.
##' See \code{vignette('BALD')}.
##'
##' @name tailFactor
##' @param object The object from which to plot the predicted tail factors and return tail factors for \emph{all} attachment points.
##' @param attachment An integer value specifying the attachment point for the tail. Must be at least 1. See Details for more information.
##' @param useObservedValues A logical value. If \code{TRUE}, observed values are substituted for predicted values whenever possible in the calculation. If \code{FALSE}, only predicted values are used.
##' @param firstIsHalfReport A logical value or \code{NA}. See Details for more info.
##' @param finalAttachment An integer value must be at least 1. Default value is \code{attachment}. A call to \code{tailFactor} will return (invisibly) a matrix of tail factors through this value.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @param expYearRange Either a range of years (for example c(1995, 2006)) or one of the keywords \dQuote{all} or \dQuote{fullyObs}.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=tailFactor,StandardAnnualAggLossDevModelOutput-method]{tailFactor("StandardAnnualAggLossDevModelOutput")}}
##' \code{\link[=tailFactor,BreakAnnualAggLossDevModelOutput-method]{tailFactor("BreakAnnualAggLossDevModelOutput")}}
##' @exportMethod tailFactor
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' tailFactor(standard.model.output,10)
##' }
setGenericVerif('tailFactor',
function(object, attachment, useObservedValues=FALSE, firstIsHalfReport=NA, finalAttachment=attachment, plot=TRUE, expYearRange='all')
{
if(!is.numeric(attachment))
stop('"attachment" must be numeric')
if(!identical(length(attachment), as.integer(1)))
stop('"attachment" must be of length 1')
if(is.na(attachment))
stop('"attachment" cannot be NA')
if(as.integer(attachment) != attachment)
stop('"attachment" must be coercible to an integer')
if(attachment <= 0)
stop('"attachment" must be at least 1')
if(!is.numeric(finalAttachment))
stop('"finalAttachment" must be numeric')
if(!identical(length(finalAttachment), as.integer(1)))
stop('"finalAttachment" must be of length 1')
if(is.na(finalAttachment))
stop('"finalAttachment" cannot be NA')
if(as.integer(finalAttachment) != finalAttachment)
stop('"finalAttachment" must be coercible to an integer')
if(finalAttachment <= 0)
stop('"finalAttachment" must be at least 1')
if(finalAttachment < attachment)
stop('"finalAttachment" must be at least equal to "attachment"')
if(is.character(expYearRange))
{
if(length(expYearRange) != 1)
stop('"expYearRange" must be of length one if it is a character')
if(expYearRange != 'all' && expYearRange != 'fullyObs')
stop('"expYearRange" must be one of "all" or "fullyObs" if it is supplied as a character')
##if(expYearRange == 'all')
##expYearRange <- range(exp.years)
##else
##expYearRange <- range(exp.years[which(!is.na(cumulatives[,1]))])
} else {
if(!all(as.integer(expYearRange) == expYearRange))
stop('"expYearRange" must be supplied as an integer')
if(length(expYearRange) != 2)
stop('"expYearRange" must have length 2')
##if(max(exp.years) < max(expYearRange) || min(exp.years) > min(expYearRange))
##stop('"expYearRange" must be a subset of the actual exposure years')
}
standardGeneric('tailFactor')
})
##' A generic function to plot and/or return the posterior number of knots.
##'
##' The \link[=consumptionPath]{consumption path} (or calendar year effect and exposure growth adjusted log incremental payments) is modeled as a linear spline.
##' The number of knots (or places where the spline changes slope) in this spline is endogenous to the model and estimated by way of Reversible Jump Markov Chain Monte Carlo simulation.
##' See \code{vignette('BALD')}.
##'
##' @name numberOfKnots
##' @param object The object from which to plot the number of knots.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns statics on the number of knots. Returned invisibly.
##' @seealso \code{\link{consumptionPath}}
##' \code{\link[=numberOfKnots,StandardAnnualAggLossDevModelOutput-method]{numberOfKnots("StandardAnnualAggLossDevModelOutput")}}
##' \code{\link[=numberOfKnots,BreakAnnualAggLossDevModelOutput-method]{numberOfKnots("BreakAnnualAggLossDevModelOutput")}}
##' @exportMethod numberOfKnots
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' numberOfKnots(standard.model.output,10)
##' }
setGenericVerif('numberOfKnots',
function(object, plot=TRUE)
{
standardGeneric('numberOfKnots')
})
##' A generic function to plot autocorrelations found in the \acronym{MCMC} samples for select parameters.
##'
##' Chains with high autocorrelation require a longer burnin and more samples to fully explore the parameter space.
##' See \code{vignette('BALD')}.
##'
##' @name mcmcACF
##' @param object The object from which to plot autocorrelations.
##' @return Called for the side effect of plotting.
##' @seealso \code{\link[=mcmcACF,StandardAnnualAggLossDevModelOutput-method]{mcmcACF("StandardAnnualAggLossDevModelOutput")}}
##' \code{\link[=mcmcACF,BreakAnnualAggLossDevModelOutput-method]{mcmcACF("BreakAnnualAggLossDevModelOutput")}}
##' @exportMethod mcmcACF
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' mcmcACF(standard.model.output)
##' }
setGenericVerif('mcmcACF',
function(object)
{
standardGeneric('mcmcACF')
})
| /R/AnnualAggLossDevModelOutput.R | no_license | cran/BALD | R | false | false | 167,147 | r | ##################################################################################################
## ##
## BALD is an R-package. ##
## It is a Bayesian time series model of loss development. ##
## Features include skewed Student-t distribution with time-varying scale parameters, ##
## an expert prior for the calendar year effect, ##
## and accommodation for structural breaks in the consumption path of development years. ##
## It is an update for the older package lossDev as it has been stopped supported. ##
## ##
## Copyright (c) 2018 Frank A. Schmid, ##
## ##
## This file is part of BALD. ##
## ##
## lossDev is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <https://www.gnu.org/licenses/>. ##
## ##
##################################################################################################
##' @include zzz.R
##' @include NodeOutput.R
##' @include LossDevModelOutput.R
NULL
##' The base output class for all aggregate annual models.
##'
##' \code{AnnualAggLossDevModelOutput} is the base output class for all aggregate annual model objects.
##' Derived classes should contain all output from a \acronym{JAGS} run of the input object in the slot \dQuote{input}.
##' Currenly only the slot \dQuote{input} is allowed to be a non-model node. All other nodes should be the exact name of some settable node in the model.
##' This is because \code{getModelOutputNodes} currently looks at the slot names to determine what values to set; only slot \dQuote{input} is known to be a slot other than a settable node.
##' This class is derived from \code{LossDevModelOutput}
##' @name AnnualAggLossDevModelOutput-class
##' @docType class
##' @seealso \code{\linkS4class{LossDevModelOutput}}
setClass(
'AnnualAggLossDevModelOutput',
representation(inc.pred='NodeOutput',
eta='NodeOutput',
eta.mu='NodeOutput',
sigma.eta='NodeOutput',
sigma.kappa='NodeOutput',
kappa.log.error='NodeOutput',
rho='NodeOutput',
rho.eta='NodeOutput',
h='NodeOutput',
sigma.h.2.log.innov='NodeOutput',
beta='NodeOutput',
df='NodeOutput',
k='NodeOutput',
mu.upper.left='NodeOutput',
a.ou='NodeOutput',
b.ou='NodeOutput',
stoch.log.inf.pred='NodeOutput',
kappa='NodeOutput',
delta.tail='NodeOutput',
#omega.obs='NodeOutput',
'VIRTUAL'),
contains='LossDevModelOutput')
##' A generic function to plot and/or return the posterior predicted exposure growth (corresponding to \emph{eta} in the model). See \code{vignette('BALD')}.
##'
##' @name exposureGrowth
##' @param object The object from which to plot and/or return the posterior predicted exposure growth.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting the exposure growth. Also returns a named numeric vector for the median of the posterior for the exposure growth on the real (not log) scale. Returned invisibly.
##' @seealso \code{\link[=exposureGrowth,AnnualAggLossDevModelOutput-method]{exposureGrowth("AnnualAggLossDevModelOutput")}}
##' \code{\link{exposureGrowthTracePlot}}
##' @exportMethod exposureGrowth
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(
##' as.integer(dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' exposureGrowthTracePlot(standard.model.output)
##' }
setGenericVerif('exposureGrowth',
function(object, plot=TRUE)
standardGeneric('exposureGrowth'))
##' A method to plot and/or return the posterior predicted exposure growth (corresponding to \emph{eta} in the model).
##'
##' @name exposureGrowth,AnnualAggLossDevModelOutput-method
##' @param object The object from which to plot and/or return the posterior predicted exposure growth.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting the exposure growth. Also returns a named numeric vector for the median of the posterior for the exposure growth on the real (not log) scale. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{exposureGrowth}}
##' \code{\link{exposureGrowthTracePlot}}
setMethod('exposureGrowth',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plot)
{
K <- getTriDim(object@input)[1]
eta <- object@eta@median[-1]
obs.years <- object@input@exposureYears[-1]
pred.years <- 1:(length(eta) - (K-1)) + max(obs.years)
eta.obs <- eta[1:length(obs.years)]
eta.pred <- eta[1:length(pred.years) + length(obs.years)]
ans <- c(eta.obs, eta.pred)
names(ans) <- c(obs.years, pred.years)
if(plot)
{
f.plot <- function()
{
plot(
x=range(obs.years, pred.years),
y=range(eta),
xlab=getExposureYearLabel(object@input),
ylab="Rate of Exposure Growth (Net of Calendar Year Effect)",
type='n',
cex.axis=1.25,
cex.lab=1.25)
lines(
x=obs.years,
y=eta.obs,
type='o',
lty=1,
pch=1,
lwd=1)
lines(
x=pred.years,
y=eta.pred,
type='o',
lty=3,
pch=20,
lwd=2)
abline(h=median(exp(slot(object@eta.mu, 'value')) - 1),
col='black',
lwd=2,
lty=2)
}
f.legend <- function()
{
legend('center',
c('Rate of Exposure Growth','Future Rate of Growth','Stationary Mean'),
col=c('black','black','black'),
lwd=c(1,2,2),
pch=c(1,20,NA),
lty=c(1,3,2),
horiz=TRUE,
bty='n',
xpd=NA)
}
plot.top.bottom(f.plot, f.legend)
}
return(invisible(ans))
})
##' A generic function to plot and/or return the difference between final actual and predicted cumulative payments. See \code{vignette('BALD')}.
##'
##' @name finalCumulativeDiff
##' @param object The object from which to plot and/or return the difference.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @param expYearRange Either a range of years (for example c(1995, 2006)) or one of the keywords \dQuote{all} or \dQuote{fullyObs}.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=finalCumulativeDiff,AnnualAggLossDevModelOutput-method]{finalCumulativeDiff("AnnualAggLossDevModelOutput")}}
##' @exportMethod finalCumulativeDiff
##' @examples
##' rm(list=ls())
##' library(BALD)
##' options(device.ask.default=FALSE)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' finalCumulativeDiff(standard.model.output)
##' }
setGenericVerif('finalCumulativeDiff',
function(object, plot=TRUE, expYearRange='all')
standardGeneric('finalCumulativeDiff'))
##' A method to plot and/or return the difference between final actual and predicted cumulative payments.
##'
##' The relative difference (x/y - 1) between the final observed cumulative payment and the corresponding predicted cumulative payment is plotted for each exposure year.
##' The horizontal lines of each box represent (starting from the top) the 90th, 75th, 50th, 20th, and 10th percentiles. Exposure years in which all cumulative payments are \code{NA} are omitted.
##'
##' If \code{expYearRange} is \dQuote{fullyObs}, then only exposure years with a non missing value in the first column will be plotted.
##'
##' @name finalCumulativeDiff,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the difference between final actual and predicted cumulative payments.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @param expYearRange Either a range of years (for example c(1995, 2006)) or one of the keywords \dQuote{all} or \dQuote{fullyObs}.
##' @return Mainly called for the side effect of plotting the difference between final actual and predicted cumulative payments by exposure year. Also returns a named array for the percentiles in the plot. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{finalCumulativeDiff}}
setMethod('finalCumulativeDiff',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plot, expYearRange)
{
K <- getTriDim(object@input)[1]
inc.pred.coda <- slot(object@inc.pred, 'value')[1:K, 1:K,,]
cumulatives <- object@input@cumulatives
exp.years <- object@input@exposureYears
if(is.character(expYearRange))
{
if(length(expYearRange) != 1)
stop('"expYearRange" must be of length one if it is a character')
if(expYearRange != 'all' && expYearRange != 'fullyObs')
stop('"expYearRange" must be one of "all" or "fullyObs" if it is supplied as a character')
if(expYearRange == 'all')
expYearRange <- range(exp.years)
else
expYearRange <- range(exp.years[which(!is.na(cumulatives[,1]))])
} else {
if(!all(as.integer(expYearRange) == expYearRange))
stop('"expYearRange" must be supplied as an integer')
if(length(expYearRange) != 2)
stop('"expYearRange" must have length 2')
if(max(exp.years) < max(expYearRange) || min(exp.years) > min(expYearRange))
stop('"expYearRange" must be a subset of the actual exposure years')
}
cumulative.resi.stats <- array(NA, c(5, K), dimnames=list(c('10%', '25%', '50%', '75%', '90%'), exp.years))
for(i in 1:K)
{
tmp <- which(!is.na(cumulatives[i,]))
if(length(tmp) == 0)
{
cumulative.resi.stats[,i] <- NA
next
}else{
last.obs.cumulative.column <- max(tmp)
if(length(tmp) == 1 && tmp[1] == 1)
diff <- cumulatives[i,last.obs.cumulative.column] /apply(inc.pred.coda[i,1,,], c(1,2), sum) - 1
else
diff <- cumulatives[i,last.obs.cumulative.column] / apply(inc.pred.coda[i,1:last.obs.cumulative.column,,], c(2,3), sum) - 1
stats <- quantile(diff, c(.1, .25, .5, .75, .9))
cumulative.resi.stats[names(stats),i] <- stats
}
}
if(plot)
{
expYearRange.seq <- seq(expYearRange[1], expYearRange[2])
plot(
x=range(exp.years) + c(-1, +1),
y=range(as.vector(cumulative.resi.stats[,as.character(expYearRange.seq) ]), na.rm=TRUE),
type='n',
xlab=getExposureYearLabel(object@input),
ylab="Relative Difference Between Actual and Estimated Cumulatives",
cex.axis=1.25,
cex.lab=1.25)
abline(h=0,col='gray23',lwd=2,lty='dashed')
for(i in seq_along(expYearRange.seq))
{
year.i <- expYearRange.seq[i]
i. <- match(year.i, object@input@exposureYears)
##draw median to make it thick
off.set <- .45
lines(x=c(year.i-off.set, year.i+off.set),
y=rep(cumulative.resi.stats['50%',i.],2),
lwd=2)
##upper 25%
off.set <- .45
upper.lower <- c('75%','50%')
lines(x=c(year.i-off.set, year.i+off.set, year.i+off.set, year.i-off.set, year.i-off.set),
y=cumulative.resi.stats[upper.lower[c(1,1,2,2,1)],i.])
##lower 25%
off.set <- .45
upper.lower <- c('50%','25%')
lines(x=c(year.i-off.set, year.i+off.set, year.i+off.set, year.i-off.set, year.i-off.set),
y=cumulative.resi.stats[upper.lower[c(1,1,2,2,1)],i.])
##lower%
off.set <- .25
upper.lower <- c('25%','10%')
lines(x=c(year.i-off.set, year.i+off.set, year.i+off.set, year.i-off.set, year.i-off.set),
y=cumulative.resi.stats[upper.lower[c(1,1,2,2,1)],i.])
##upper%
off.set <- .25
upper.lower <- c('90%','75%')
lines(x=c(year.i-off.set, year.i+off.set, year.i+off.set, year.i-off.set, year.i-off.set),
y=cumulative.resi.stats[upper.lower[c(1,1,2,2,1)],i.])
}
}
return(invisible(cumulative.resi.stats))
}
)
##' A generic function to plot and/or return residuals for models in the \pkg{BALD} package. See \code{vignette('BALD')}.
##'
##' @name triResi
##' @param object The object from which to plot and/or return the residuals.
##' @param standardize A logical value. If \code{TRUE}, the plotted and returned residuals are normalized to their respective standard deviation.
##' @param timeAxis A character value describing along which of the three time axes to plot the residuals: \sQuote{dy} for development year time, \sQuote{cy} for calendar year time, \sQuote{ey} for exposure year time.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=triResi,AnnualAggLossDevModelOutput-method]{triResi("AnnualAggLossDevModelOutput")}}
##' \code{\link{QQPlot}}
##' @exportMethod triResi
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' #residule plot by Development Year
##' triResi(standard.model.output, timeAxis='dy')
##' #residule plot by exposure year
##' triResi(standard.model.output, timeAxis='ey')
##' #residule plot by calendar year
##' triResi(standard.model.output, timeAxis='cy')
##' }
setGenericVerif('triResi',
function(object, timeAxis=c('dy', 'cy', 'ey'), standardize=TRUE, plot=TRUE)
standardGeneric('triResi'))
##' A method to plot and/or return residuals for models in the \pkg{BALD} package.
##'
##' Because the model is Bayesian, each residual comes as a distribution. To ease graphical interpretation, only the median for each residual is plotted/returned.
##' The residual is defined as the observed value minus the posterior mean; if standardized, it is also divided by its posterior standard deviation.
##'
##' @name triResi,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the residuals.
##' @param timeAxis A character value describing along which of the three (3) time axis to plot the residuals. \sQuote{dy} for development year time, \sQuote{cy} for calendar year time, \sQuote{ey} for exposure year time.
##' @param standardize A logical value. If \code{TRUE}, the plotted and returned residuals are normalized to their respective standard deviation.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with the same structure as the input triangle. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{triResi}}
##' \code{\link{QQPlot}}
setMethod('triResi',
signature(object='AnnualAggLossDevModelOutput'),
function(object, timeAxis, standardize, plot)
{
timeAxis <- match.arg(timeAxis)
K <- getTriDim(object@input)[1]
log.inc <- object@input@incrementals
log.inc[log.inc <= 0] <- NA
log.inc <- log(log.inc)
mu <- slot(object@mu.upper.left, 'value')
beta <- slot(object@beta, 'value')[1,,]
v <- slot(object@df, 'value')[1,,]
h <- slot(object@h, 'value')
resi <- array(NA, c(K, K), list(object@input@exposureYears, NULL))
v.factor <- v / (v - 2)
h.squared <- h ^ 2
if(standardize)
{
h.to.the.forth <- h.squared ^ 2
var.second.factor <- 2 * beta ^ 2 *v.factor ^ 2 / (v - 4)
for(i in 1:K)
for(j in 1:K)
{
if(is.na(log.inc[i,j]))
next
resi[i,j] <- median((log.inc[i,j] -
(mu[i,j,,] + beta * h.squared[j,,] * v.factor)) /
sqrt(h.squared[j,,] * v.factor + h.to.the.forth[j,,] * var.second.factor))
}
} else {
for(i in 1:K)
for(j in 1:K)
{
if(is.na(log.inc[i,j]))
next
resi[i,j] <- median(log.inc[i,j] -
(mu[i,j,,] + beta * h.squared[j,,] * v.factor))
}
}
if(identical(timeAxis, 'dy'))
{
f.plot <- function()
{
plot(x=c(1,K),
y=range(resi, na.rm=TRUE),
ylab=ifelse(standardize, 'Standardized Residuals', 'Residuals'),
xlab='Development Year',
type="n",
cex.axis=1.25,
cex.lab=1.25)
abline(a=0,b=0,lwd=2,col='black',lty='dashed')
for(i in 1:K)
points(x=rep(i,K),
y=resi[,i])
points(x=1:K, y=apply(resi, 2, median, na.rm=TRUE), lwd=3, type="h", col='red') #bars
points(x=1:K, y=apply(resi, 2, median, na.rm=TRUE), pch=20, type="p", col='red') #pinheads
}
} else if(identical(timeAxis, 'ey')) {
exp.years <- object@input@exposureYears
f.plot <- function()
{
plot(x=range(exp.years),
y=range(resi, na.rm=TRUE),
ylab=ifelse(standardize, 'Standardized Residuals', 'Residuals'),
xlab=getExposureYearLabel(object@input),
type="n",
cex.axis=1.25,
cex.lab=1.25)
abline(a=0,b=0,lwd=2,col='black',lty='dashed')
for(i in 1:K)
points(x=rep(exp.years[i],K),
y=resi[i,])
points(x=exp.years, y=apply(resi, 1, median, na.rm=TRUE), lwd=3, type="h", col='red') #bars
points(x=exp.years, y=apply(resi, 1, median, na.rm=TRUE), pch=20, type="p", col='red') #pinheads
}
} else if(identical(timeAxis, 'cy')) {
cal.years <- object@input@exposureYears
f.plot <- function()
{
plot(x=range(cal.years),
y=range(resi, na.rm=TRUE),
ylab=ifelse(standardize, 'Standardized Residuals', 'Residuals'),
xlab='Calendar Year',
type="n",
cex.axis=1.25,
cex.lab=1.25)
abline(a=0,b=0,lwd=2,col='black',lty='dashed')
i <- rep(1:K, K)
j <- rep(1:K, rep(K, K))
for(k in 1:K)
{
sub <- resi[i+j-1 == k]
l <- length(sub)
points(x=rep(cal.years[k],l),
y=sub)
points(x=cal.years[k],
y=median(sub, na.rm=TRUE),
lwd=3,
type='h',
col='red')
points(x=cal.years[k],
y=median(sub, na.rm=TRUE),
lwd=3,
type='p',
col='red')
}
}
}
f.legend <- function()
{
legend('center',
'Median of Residuals',
col=c('red'),
pch=c(20),
horiz=TRUE,
bty='n',
xpd=NA)
}
if(plot)
plot.top.bottom(f.plot, f.legend)
return(invisible(resi))
})
##' A generic function to plot a Q-Q plot for models in the \pkg{BALD} package.
##'
##' This function plots sorted observed log incremental payments vs sorted predicted log incremental payments.
##' Credible intervals are also plotted. See \code{vignette('BALD')}.
##'
##' @name QQPlot
##' @param object The object from which to plot the values.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=QQPlot,AnnualAggLossDevModelOutput-method]{QQPlot("AnnualAggLossDevModelOutput")}}
##' \code{\link{triResi}}
##' @exportMethod QQPlot
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' QQPlot(standard.model.output)
##' }
setGenericVerif('QQPlot',
function(object)
standardGeneric('QQPlot'))
##' A method to plot a Q-Q plot for models in the \pkg{BALD} package.
##'
##' This function plots sorted observed log incremental payments vs sorted predicted log incremental payments.
##' Credible intervals are also plotted.
##'
##' @name QQPlot,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot the values.
##' @return NULL. Called for the side effect of plotting.
##' @docType methods
##' @seealso \code{\link{QQPlot}}
##' \code{\link{triResi}}
setMethod('QQPlot',
signature(object='AnnualAggLossDevModelOutput'),
function(object)
{
##plot obs log.inc vs log.inc.pred
##by first sorting for every draw the log.inc.pred and then taking quantiles over the sorted values
f.plot <- function()
{
K <- getTriDim(object@input)[1]
inc.obs <- object@input@incrementals
inc.pred <- slot(object@inc.pred, 'value')[1:K, 1:K, , ]
log.inc.obs <- inc.obs
log.inc.obs[log.inc.obs <= 0] <- NA
log.inc.obs <- log(log.inc.obs)
log.inc.obs.not.na <- !is.na(log.inc.obs)
obs.s <- sort(as.vector(inc.obs[log.inc.obs.not.na]))
pred.s <- apply(inc.pred, c(3,4), function(x) sort(as.vector(x[log.inc.obs.not.na])))
pred.s.q <- apply(pred.s, 1, quantile, c(0.05, 0.5, 0.95))
plot(x=range(obs.s),
xlab='Sorted Observed Incrementals (Log Scale)',
y=range(pred.s.q),
ylab='Sorted Predicted Incrementals (Log Scale)',
type='n',
cex.axis=1.25,
cex.lab=1.25,
log='xy')
lines(x=obs.s,
y=pred.s.q[1,])
points(x=obs.s,
y=pred.s.q[2,],
cex=1.3)
lines(x=obs.s,
y=pred.s.q[3,])
abline(a=0,b=1,col='red',lty=2)
}
f.legend <- function()
{
legend('center',
c('Median', '90 Percent\nCredible Intervals', '45 Degree Line'),
col=c('black', 'black', 'red'),
lty=c(NA, 1, 2),
pch=c(1, NA, NA),
horiz=TRUE,
bty='n',
xpd=NA)
}
plot.top.bottom(f.plot, f.legend)
})
##' A generic function to plot and/or return the posterior of the skewness parameter for models in \pkg{BALD}.
##'
##' The skewness parameter does not directly correspond to the degree of skewness. However, all else being equal, a larger (in magnitude) skewness parameter indicates a higher degree of skewness,
##' and a skewness parameter of zero equates to zero skew.
##' See \code{vignette('BALD')}.
##'
##' @references
##' Kim, Y., and J. McCulloch (2007) \dQuote{The Skew-Student Distribution with Application to U.S. Stock Market Returns and the Equity Premium,} Department of Economics, Ohio State University, October 2007
##'
##' @name skewnessParameter
##' @param object The object from which to plot and/or return the skewness parameter.
##' @param plotDensity A logical value. If \code{TRUE}, then the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, then the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=skewnessParameter,AnnualAggLossDevModelOutput-method]{skewnessParameter("AnnualAggLossDevModelOutput")}}
##' @exportMethod skewnessParameter
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' skewnessParameter(standard.model.output)
##' }
setGenericVerif('skewnessParameter',
function(object, plotDensity=TRUE, plotTrace=TRUE)
standardGeneric('skewnessParameter'))
##' A method to plot and/or return the posterior of the skewness parameter for models in \pkg{BALD}.
##'
##' The skewness parameter does not directly correspond to the degree of skewness. However, all else being equal, a larger (in magnitude) skewness parameter indicates a higher degree of skewness,
##' and a skewness parameter of zero equates to zero skew.
##'
##' @references
##' Kim, Y., and J. McCulloch (2007) \dQuote{The Skew-Student Distribution with Application to U.S. Stock Market Returns and the Equity Premium,} Department of Economics, Ohio State University, October 2007
##'
##' @name skewnessParameter,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the skewness parameter.
##' @param plotDensity A logical value. If \code{TRUE}, then the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, then the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting. But also returns a named array with some select quantiles of the posterior for the skewness parameter. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{skewnessParameter}}
##' @importFrom stats integrate
setMethod('skewnessParameter',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
if(!object@input@allowForSkew)
{
warning('Cannot call "skewnessParameter" unless the model was estimated with a skewed-t. Returning "NULL" invisibly.')
return(invisible(NULL))
}
jd <- getJagsData(object@input)
precision.for.skewness <- jd$precision.for.skewness
df.for.skewness <- jd$df.for.skewness
mu <- 0
d.un <- function(x)
{
gamma((df.for.skewness+1)/2) / gamma(df.for.skewness / 2) * (precision.for.skewness/ df.for.skewness / pi) ^ 0.5 * (1 + precision.for.skewness / df.for.skewness * (x - mu)^2) ^ (-(df.for.skewness + 1) / 2)
}
l <- integrate (d.un, lower = -Inf, upper = jd$bounds.for.skewness[1])$value
u <- integrate (d.un, lower = -Inf, upper = jd$bounds.for.skewness[2])$value
d <- function(x)
{
if(x < jd$bounds.for.skewness[1] || x > jd$bounds.for.skewness[2])
return(0)
return(d.un(x) / (u - l))
}
ans <- plot.density.and.or.trace(coda=slot(object@beta, 'value')[1,,],
plotDensity = plotDensity ,
plotTrace = plotTrace,
d.prior=d,
nice.parameter.name='Skewness Parameter',
zero.line=TRUE,
lower.bound=jd$bounds.for.skewness[1],
upper.bound=jd$bounds.for.skewness[2])
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior of the autoregressive parameter for models in \pkg{BALD}.
##' See \code{vignette('BALD')}.
##'
##'
##' @name autoregressiveParameter
##' @param object The object from which to plot and/or return the autoregressive parameter.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=autoregressiveParameter,AnnualAggLossDevModelOutput-method]{autoregressiveParameter("AnnualAggLossDevModelOutput")}}
##' \code{\link{standardDeviationOfCalendarYearEffect}}
##' \code{\link{calendarYearEffect}}
##' \code{\link{calendarYearEffectErrors}}
##' @exportMethod autoregressiveParameter
##' @examples
##' rm(list=ls())
##' library(BALD)
##' data(CumulativeAutoBodilyInjuryTriangle)
##' CumulativeAutoBodilyInjuryTriangle <- as.matrix(CumulativeAutoBodilyInjuryTriangle)
##' sample.col <- (dim(CumulativeAutoBodilyInjuryTriangle)[2] - 6:0)
##' data(HPCE)
##' HPCE <- as.matrix(HPCE)[,1]
##' HPCE.rate <- HPCE[-1] / HPCE[-length(HPCE)] - 1
##' print(HPCE.rate[(-10):0 + length(HPCE.rate)])
##' HPCE.years <- as.integer(names(HPCE.rate))
##' max.exp.year <- max(as.integer(
##' dimnames(CumulativeAutoBodilyInjuryTriangle)[[1]]))
##' years.to.keep <- HPCE.years <= max.exp.year + 3
##' HPCE.rate <- HPCE.rate[years.to.keep]
##' break.model.input.w.ar1 <- makeBreakAnnualInput(
##' cumulative.payments = CumulativeAutoBodilyInjuryTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = HPCE.rate,
##' first.year.in.new.regime = c(1986, 1987),
##' prior.for.first.year.in.new.regime=c(2,1),
##' exp.year.type = 'ay',
##' extra.dev.years = 5,
##' use.skew.t = TRUE,
##' bound.for.skewness.parameter=5,
##' use.ar1.in.calendar.year = TRUE)
##' \dontrun{
##' break.model.output.w.ar1 <- runLossDevModel(
##' break.model.input.w.ar1,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' calendarYearEffectAutoregressiveParameter(break.model.output.w.ar1)
##' }
setGenericVerif('autoregressiveParameter',
function(object, plotDensity=TRUE, plotTrace=TRUE)
{
.Deprecated('calendarYearEffectAutoregressiveParameter')
standardGeneric('autoregressiveParameter')
})
##' A method to plot and/or return the posterior of the autoregressive parameter for models in \pkg{BALD}.
##'
##' @name autoregressiveParameter,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the autoregressive parameter.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with select quantiles of the posterior for the autoregressive parameter. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{autoregressiveParameter}}
##' \code{\link{standardDeviationOfCalendarYearEffect}}
##' \code{\link{calendarYearEffect}}
##' \code{\link{calendarYearEffectErrors}}
setMethod('autoregressiveParameter',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
calendarYearEffectAutoregressiveParameter(object, plotDensity, plotTrace)
})
##' A generic function to plot and/or return the posterior of the autoregressive parameter for the calendar year effect for models in \pkg{BALD}.
##' See \code{vignette('BALD')}.
##'
##'
##' @name calendarYearEffectAutoregressiveParameter
##' @param object The object from which to plot and/or return the autoregressive parameter which is associated with the calendar year effect.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=calendarYearEffectAutoregressiveParameter,AnnualAggLossDevModelOutput-method]{calendarYearEffectAutoregressiveParameter("AnnualAggLossDevModelOutput")}}
##' \code{\link{standardDeviationOfCalendarYearEffect}}
##' \code{\link{calendarYearEffect}}
##' \code{\link{calendarYearEffectErrors}}
##' @exportMethod calendarYearEffectAutoregressiveParameter
##' @examples
##' rm(list=ls())
##' library(BALD)
##' data(CumulativeAutoBodilyInjuryTriangle)
##' CumulativeAutoBodilyInjuryTriangle <- as.matrix(CumulativeAutoBodilyInjuryTriangle)
##' sample.col <- (dim(CumulativeAutoBodilyInjuryTriangle)[2] - 6:0)
##' print(decumulate(CumulativeAutoBodilyInjuryTriangle)[1:7, sample.col])
##' data(HPCE)
##' HPCE <- as.matrix(HPCE)[,1]
##' HPCE.rate <- HPCE[-1] / HPCE[-length(HPCE)] - 1
##' print(HPCE.rate[(-10):0 + length(HPCE.rate)])
##' HPCE.years <- as.integer(names(HPCE.rate))
##' max.exp.year <- max(as.integer(dimnames(CumulativeAutoBodilyInjuryTriangle)[[1]]))
##' years.to.keep <- HPCE.years <= max.exp.year + 3
##' HPCE.rate <- HPCE.rate[years.to.keep]
##' break.model.input.w.ar1 <- makeBreakAnnualInput(
##' cumulative.payments = CumulativeAutoBodilyInjuryTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = HPCE.rate,
##' first.year.in.new.regime = c(1986, 1987),
##' prior.for.first.year.in.new.regime=c(2,1),
##' exp.year.type = 'ay',
##' extra.dev.years = 5,
##' use.skew.t = TRUE,
##' bound.for.skewness.parameter=5,
##' use.ar1.in.calendar.year = TRUE)
##' \dontrun{
##' break.model.output.w.ar1 <- runLossDevModel(
##' break.model.input.w.ar1,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' calendarYearEffectAutoregressiveParameter(break.model.output.w.ar1)
##' }
setGenericVerif('calendarYearEffectAutoregressiveParameter',
function(object, plotDensity=TRUE, plotTrace=TRUE)
{
standardGeneric('calendarYearEffectAutoregressiveParameter')
})
##' A method to plot and/or return the posterior of the autoregressive parameter for the calendar year effect for models in \pkg{BALD}.
##'
##' @name calendarYearEffectAutoregressiveParameter,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the autoregressive parameter which is associated with the calendar year effect.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with select quantiles of the posterior for the autoregressive parameter. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{calendarYearEffectAutoregressiveParameter}}
##' \code{\link{standardDeviationOfCalendarYearEffect}}
##' \code{\link{calendarYearEffect}}
##' \code{\link{calendarYearEffectErrors}}
setMethod('calendarYearEffectAutoregressiveParameter',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
if(!object@input@ar1InCalendarYearEffect)
{
warning('Cannot call "calendarYearEffectAutoregressiveParameter" unless the model was estimated with a autoregressive error term in the calendar year effect. Returning "NULL" invisibly.')
return(invisible(NULL))
}
jd <- getJagsData(object@input)
ans <- plot.density.and.or.trace(coda=slot(object@rho, 'value')[1,,],
plotDensity = plotDensity ,
plotTrace = plotTrace,
d.prior=function(x) dbeta(x, jd$rho.prior[1], jd$rho.prior[2]),
nice.parameter.name='Calendar Year AR Parameter',
zero.line=FALSE,
lower.bound=0,
upper.bound=1)
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior of the autoregressive parameter for the exposure growth for models in \pkg{BALD}.
##' See \code{vignette('BALD')}.
##'
##'
##' @name exposureGrowthAutoregressiveParameter
##' @param object The object from which to plot and/or return the autoregressive parameter which is associated with exposure growth.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=exposureGrowthAutoregressiveParameter,AnnualAggLossDevModelOutput-method]{exposureGrowthAutoregressiveParameter("AnnualAggLossDevModelOutput")}}
##' @exportMethod exposureGrowthAutoregressiveParameter
##' @examples
##' rm(list=ls())
##' library(BALD)
##' data(CumulativeAutoBodilyInjuryTriangle)
##' CumulativeAutoBodilyInjuryTriangle <- as.matrix(CumulativeAutoBodilyInjuryTriangle)
##' sample.col <- (dim(CumulativeAutoBodilyInjuryTriangle)[2] - 6:0)
##' print(decumulate(CumulativeAutoBodilyInjuryTriangle)[1:7, sample.col])
##' data(HPCE)
##' HPCE <- as.matrix(HPCE)[,1]
##' HPCE.rate <- HPCE[-1] / HPCE[-length(HPCE)] - 1
##' print(HPCE.rate[(-10):0 + length(HPCE.rate)])
##' HPCE.years <- as.integer(names(HPCE.rate))
##' max.exp.year <- max(as.integer(dimnames(CumulativeAutoBodilyInjuryTriangle)[[1]]))
##' years.to.keep <- HPCE.years <= max.exp.year + 3
##' HPCE.rate <- HPCE.rate[years.to.keep]
##' break.model.input.w.ar1 <- makeBreakAnnualInput(
##' cumulative.payments = CumulativeAutoBodilyInjuryTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = HPCE.rate,
##' first.year.in.new.regime = c(1986, 1987),
##' prior.for.first.year.in.new.regime=c(2,1),
##' exp.year.type = 'ay',
##' extra.dev.years = 5,
##' use.skew.t = TRUE,
##' bound.for.skewness.parameter=5,
##' use.ar1.in.exposure.growth = TRUE)
##' \dontrun{
##' break.model.output.w.ar1 <- runLossDevModel(
##' break.model.input.w.ar1,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' exposureGrowthAutoregressiveParameter(break.model.output.w.ar1)
##' }
setGenericVerif('exposureGrowthAutoregressiveParameter',
function(object, plotDensity=TRUE, plotTrace=TRUE)
{
standardGeneric('exposureGrowthAutoregressiveParameter')
})
##' A method to plot and/or return the posterior of the autoregressive parameter for the exposure growth for models in \pkg{BALD}.
##'
##' @name exposureGrowthAutoregressiveParameter,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the autoregressive parameter which is associated with exposure growth.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with select quantiles of the posterior for the autoregressive parameter. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{exposureGrowthAutoregressiveParameter}}
setMethod('exposureGrowthAutoregressiveParameter',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
if(!object@input@ar1InExposureGrowth)
{
warning('Cannot call "exposureGrowthAutoregressiveParameter" unless the model was estimated with a autoregressive error term in the calendar year effect. Returning "NULL" invisibly.')
return(invisible(NULL))
}
jd <- getJagsData(object@input)
ans <- plot.density.and.or.trace(coda=slot(object@rho.eta, 'value')[1,,],
plotDensity = plotDensity ,
plotTrace = plotTrace,
d.prior=function(x) dbeta(x, jd$rho.eta.prior[1], jd$rho.eta.prior[2]),
nice.parameter.name='Exposure Growth AR Parameter',
zero.line=FALSE,
lower.bound=0,
upper.bound=1)
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior of the mean exposure growth for models in \pkg{BALD}.
##'
##' (Optionally) exposure growth is modeled as an ar1 process. This inherently assumes that periods of high exposure growth are (or at least have the possibility of being) followed by continued high periods.
##' See \code{vignette('BALD')}.
##'
##' @name meanExposureGrowth
##' @param object The object from which to plot and/or return the mean exposure growth.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=meanExposureGrowth,AnnualAggLossDevModelOutput-method]{meanExposureGrowth("AnnualAggLossDevModelOutput")}}
##' @exportMethod meanExposureGrowth
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' meanExposureGrowth(standard.model.output)
##' }
setGenericVerif('meanExposureGrowth',
function(object, plotDensity=TRUE, plotTrace=TRUE)
standardGeneric('meanExposureGrowth'))
##' A method to plot and/or return the posterior of the mean exposure growth for models in \pkg{BALD}.
##'
##'
##' @name meanExposureGrowth,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the mean exposure growth.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with select quantiles of the posterior for the mean exposure growth. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{meanExposureGrowth}}
setMethod('meanExposureGrowth',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
ans <- plot.density.and.or.trace(coda=slot(object@eta.mu, 'value')[1,,],
plotDensity = plotDensity ,
plotTrace = plotTrace,
d.prior=function(x) dnorm(x, 0, sqrt(1/getJagsData(object@input)$precision.for.eta.mu)),
nice.parameter.name='Mean Exposure Growth',
zero.line=TRUE)
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior of the degrees of freedom for the Student-\eqn{t} in models in \pkg{BALD}.
##'
##' When there is zero skew, the degrees of freedom are the degrees of freedom for the non-skewed \eqn{t}.
##' See \code{vignette('BALD')}.
##'
##' @references
##' Kim, Y., and J. McCulloch (2007) \dQuote{The Skew-Student Distribution with Application to U.S. Stock Market Returns and the Equity Premium,} Department of Economics, Ohio State University, October 2007.
##'
##' @name degreesOfFreedom
##' @param object The object from which to plot and/or return the degrees of freedom.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=degreesOfFreedom,AnnualAggLossDevModelOutput-method]{degreesOfFreedom("AnnualAggLossDevModelOutput")}}
##' @exportMethod degreesOfFreedom
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' degreesOfFreedom(standard.model.output)
##' }
setGenericVerif('degreesOfFreedom',
function(object, plotDensity=TRUE, plotTrace=TRUE)
standardGeneric('degreesOfFreedom'))
##' A method to plot and/or return the posterior of the degrees of freedom for the Student-\eqn{t} in models in \pkg{BALD}.
##'
##'
##' @name degreesOfFreedom,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the degrees of freedom.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with some select quantiles of the posterior for the degrees of freedom. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{degreesOfFreedom}}
setMethod('degreesOfFreedom',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
jd <- getJagsData(object@input)
ans <- plot.density.and.or.trace(coda=slot(object@df, 'value')[1,,],
plotDensity = plotDensity ,
plotTrace = plotTrace,
d.prior=function(x) dchisq(x, df=jd$df.k) / (pchisq(jd$df.bounds[2], jd$df.k) - pchisq(jd$df.bounds[1], jd$df.k)),
nice.parameter.name='Degrees of Freedom',
zero.line=FALSE,
lower.bound=jd$df.bounds[1],
upper.bound=jd$df.bounds[2])
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior of the standard deviation of the exposure growth rate for models in \pkg{BALD}.
##' See \code{vignette('BALD')}.
##'
##' @name standardDeviationOfExposureGrowth
##' @param object The object from which to plot and/or return the standard deviation of the exposure growth rate.
##' @param plotDensity A logical value. If \code{TRUE}, then the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, then the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=standardDeviationOfExposureGrowth,AnnualAggLossDevModelOutput-method]{standardDeviationOfExposureGrowth("AnnualAggLossDevModelOutput")}}
##' @exportMethod standardDeviationOfExposureGrowth
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' standardDeviationOfExposureGrowth(standard.model.output)
##' }
setGenericVerif('standardDeviationOfExposureGrowth',
function(object, plotDensity=TRUE, plotTrace=TRUE)
standardGeneric('standardDeviationOfExposureGrowth'))
##' A method to plot and/or return the posterior of the standard deviation of rhe exposure growth rate for models in \pkg{BALD}.
##'
##' @name standardDeviationOfExposureGrowth,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the standard deviation of the exposure growth rate.
##' @param plotDensity A logical value. If \code{TRUE}, then the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, then the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with select quantiles of the posterior for the standard deviation of exposure growth. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{standardDeviationOfExposureGrowth}}
##' \code{\link{exposureGrowth}}
##' \code{\link{meanExposureGrowth}}
setMethod('standardDeviationOfExposureGrowth',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
jd <- getJagsData(object@input)
ans <- plot.density.and.or.trace(coda=slot(object@sigma.eta, 'value')[1,,],
plotDensity = plotDensity ,
plotTrace = plotTrace,
d.prior=function(x) dunif(x, jd$sigma.eta.bounds[1], jd$sigma.eta.bounds[2]),
nice.parameter.name='Exposure Growth Standard Deviation',
zero.line=FALSE,
lower.bound=jd$sigma.eta.bounds[1])
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior of the standard deviation of the calendar year effect for models in \pkg{BALD}.
##' See \code{vignette('BALD')}.
##'
##' @name standardDeviationOfCalendarYearEffect
##' @param object The object from which to plot and/or return the standard deviation of the calendar year effect.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @exportMethod standardDeviationOfCalendarYearEffect
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' standardDeviationOfCalendarYearEffect(standard.model.output)
##' }
##'
##' @seealso \code{\link[=standardDeviationOfCalendarYearEffect,AnnualAggLossDevModelOutput-method]{standardDeviationOfCalendarYearEffect("AnnualAggLossDevModelOutput")}}
##' \code{\link{calendarYearEffect}}
##' \code{\link{calendarYearEffectErrors}}
##' \code{\link{autoregressiveParameter}}
setGenericVerif('standardDeviationOfCalendarYearEffect',
function(object, plotDensity=TRUE, plotTrace=TRUE)
standardGeneric('standardDeviationOfCalendarYearEffect'))
##' A method to plot and/or return the posterior of the standard deviation of the calendar year effect for models in \pkg{BALD}.
##'
##' @name standardDeviationOfCalendarYearEffect,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the standard deviation of the calendar year effect.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with select quantiles of the posterior for the standard deviation of the calendar year effect. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{standardDeviationOfCalendarYearEffect}}
##' \code{\link{calendarYearEffect}}
##' \code{\link{calendarYearEffectErrors}}
##' \code{\link{autoregressiveParameter}}
setMethod('standardDeviationOfCalendarYearEffect',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
jd <- getJagsData(object@input)
ans <- plot.density.and.or.trace(coda=slot(object@sigma.kappa, 'value')[1,,],
plotDensity = plotDensity ,
plotTrace = plotTrace,
d.prior=function(x) dunif(x, jd$sigma.kappa.bounds[1], jd$sigma.kappa.bounds[2]),
nice.parameter.name='Calendar Effect Standard Deviation',
zero.line=FALSE,
lower.bound=jd$sigma.kappa.bounds[1])
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior of the standard deviation for the innovation in the scale parameter for models in \pkg{BALD}.
##'
##' Changes in the scale parameter (see \code{\link{scaleParameter}}) are assumed to follow a second-order random walk on the log scale.
##' This function plots the posterior standard deviation for this random walk.
##' See \code{vignette('BALD')}.
##'
##' @name standardDeviationForScaleInnovation
##' @param object The object from which to plot and/or return the standard deviation for the innovation in the log of the scale parameter.
##' @param plotDensity A logical value. If \code{TRUE}, then the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, then the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=standardDeviationForScaleInnovation,AnnualAggLossDevModelOutput-method]{standardDeviationForScaleInnovation("AnnualAggLossDevModelOutput")}}
##' \code{\link{standardDeviationVsDevelopmentTime}}
##' @exportMethod standardDeviationForScaleInnovation
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' standardDeviationForScaleInnovation(standard.model.output)
##' }
setGenericVerif('standardDeviationForScaleInnovation',
function(object, plotDensity=TRUE, plotTrace=TRUE)
standardGeneric('standardDeviationForScaleInnovation'))
##' A method to plot and/or return the posterior of the standard deviation for the innovation in the scale parameter for models in \pkg{BALD}.
##'
##' @name standardDeviationForScaleInnovation,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the standard deviation for the innovation in the log of the scale parameter.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with some select quantiles of the posterior for the standard deviation in question. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{standardDeviationForScaleInnovation}}
##' \code{\link{scaleParameter}}
##' \code{\link{standardDeviationVsDevelopmentTime}}
setMethod('standardDeviationForScaleInnovation',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
if(object@input@noChangeInScaleParameterAfterColumn <= 2)
{
warning('Cannot call "standardDeviationForScaleInnovation" unless the model was estimated with at least three columns with different scales. Returning "NULL" invisibly.')
return(invisible(NULL))
}
jd <- getJagsData(object@input)
ans <- plot.density.and.or.trace(coda=slot(object@sigma.h.2.log.innov, 'value')[1,,],
plotDensity = plotDensity,
plotTrace = plotTrace,
draw.prior = FALSE,
nice.parameter.name='Scale Innovation Standard Deviation',
zero.line=FALSE,
lower.bound=0)
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior of the scale parameter for the Student-\eqn{t} measurement equation for models in \pkg{BALD}.
##'
##' As the degrees of freedom of the \eqn{t} goes to infinity, the scale parameter is the standard deviation of the resulting normal distribution (assuming zero skew).
##' See \code{vignette('BALD')}.
##'
##' @name scaleParameter
##' @param object The object from which to plot and/or return the scale parameter.
##' @param column The scale parameter is allowed to vary with development time. Setting \code{column} results in the plotting and returning of the scale parameter corresponding to that column. Default value is \code{1}.
##' @param plotDensity A logical value. If \code{TRUE}, then the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, then the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=scaleParameter,AnnualAggLossDevModelOutput-method]{scaleParameter("AnnualAggLossDevModelOutput")}}
##' @exportMethod scaleParameter
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' scaleParameter(standard.model.output)
##' }
setGenericVerif('scaleParameter',
function(object, column=1, plotDensity=TRUE, plotTrace=TRUE)
{
standardGeneric('scaleParameter')
})
##' A method to plot and/or return the posterior of the scale parameter for the Student-\eqn{t} measurement equation for models in \pkg{BALD}.
##'
##' @name scaleParameter,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the scale parameter.
##' @param column The scale parameter is allowed to vary with development time. Setting \code{column} results in the plotting and returning of the scale parameter corresponding to that column. Default value is \code{1}.
##' @param plotDensity A logical value. If \code{TRUE}, then the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, then the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with select quantiles of the posterior for the scale parameter. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{scaleParameter}}
setMethod('scaleParameter',
signature(object='AnnualAggLossDevModelOutput'),
function(object, column, plotDensity, plotTrace)
{
if(!is.numeric(column))
stop('"column" must be numeric')
if(!identical(length(column), as.integer(1)))
stop('"column" must be of length 1')
if(column < 1 || column > getTriDim(object@input)[1])
stop('"column" must be greater than 0 and less than the number of columns in the supplied incremental triangle.')
jd <- getJagsData(object@input)
ans <- plot.density.and.or.trace(coda=slot(object@h, 'value')[column,,],
plotDensity = plotDensity ,
plotTrace = plotTrace,
draw.prior=FALSE,
nice.parameter.name=paste('Scale Parameter:', column),
zero.line=FALSE,
lower.bound=0)
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior of the stochastic inflation rho parameter for models in \pkg{BALD}.
##'
##' If the model incorporates a stochastic rate of inflation, then that rate is assumed to follow (on the log scale) an autoregressive process of order 1.
##' (The autoregressive process of order 1 is the discrete equivalent to an Ornstein-Uhlenbeck process.)
##' This function plots the posterior for the \eqn{rho} parameter, assuming one was estimated.
##' See \code{vignette('BALD')}.
##'
##' @name stochasticInflationRhoParameter
##' @param object The object from which to plot and/or return the stochastic inflation \eqn{rho} parameter.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=stochasticInflationRhoParameter,AnnualAggLossDevModelOutput-method]{stochasticInflationRhoParameter("AnnualAggLossDevModelOutput")}}
##' \code{\link{stochasticInflationStationaryMean}}
##' \code{\link{stochasticInflation}}
##' @exportMethod stochasticInflationRhoParameter
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' stochasticInflationRhoParameter(standard.model.output)
##' }
setGenericVerif('stochasticInflationRhoParameter',
function(object, plotDensity=TRUE, plotTrace=TRUE)
{
standardGeneric('stochasticInflationRhoParameter')
})
##' A method to plot and/or return the posterior of the stochastic inflation \eqn{rho} parameter for models in \pkg{BALD}.
##'
##' @name stochasticInflationRhoParameter,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the stochastic inflation \eqn{rho} parameter.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with select quantiles of the \eqn{rho} parameter. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{stochasticInflationRhoParameter}}
##' \code{\link{stochasticInflationStationaryMean}}
##' \code{\link{stochasticInflation}}
setMethod('stochasticInflationRhoParameter',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
if(identical(object@input@stochInflationRate,0) || !is.na(object@input@knownStochInflationPersistence))
{
warning('Cannot call "stochasticInflationRhoParameter" unless 1) there is a stochastic rate of inflation and 2) the rho is not known. Returning "NULL" invisibly.')
return(invisible(NULL))
}
jd <- getJagsData(object@input)
ans <- plot.density.and.or.trace(coda=slot(object@a.ou, 'value')[1,,],
plotDensity = plotDensity ,
plotTrace = plotTrace,
d.prior = function(x) dbeta(x, jd$a.ou.prior[1], jd$a.ou.prior[2]),
nice.parameter.name='Inflation Autoregressive Parameter',
zero.line=FALSE,
lower.bound=0,
upper.bound=1)
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior of the stochastic inflation stationary mean for models in \pkg{BALD}.
##'
##' If the model incorporates a stochastic rate of inflation, then that rate is assumed to follow (on the log scale) an autoregressive process of order 1.
##' (The autoregressive process of order 1 is the discrete equivalent to an Ornstein-Uhlenbeck process.)
##' This function plots the posterior for the stationary mean (on the log scale), assuming such a mean was estimated.
##' See \code{vignette('BALD')}.
##'
##' @name stochasticInflationStationaryMean
##' @param object The object from which to plot and/or return the stochastic inflation stationary mean.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=stochasticInflationStationaryMean,AnnualAggLossDevModelOutput-method]{stochasticInflationStationaryMean("AnnualAggLossDevModelOutput")}}
##' \code{\link{stochasticInflationRhoParameter}}
##' \code{\link{stochasticInflation}}
##' @exportMethod stochasticInflationStationaryMean
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' stochasticInflationStationaryMean(standard.model.output)
##' }
setGenericVerif('stochasticInflationStationaryMean',
function(object, plotDensity=TRUE, plotTrace=TRUE)
{
standardGeneric('stochasticInflationStationaryMean')
})
##' A method to plot and/or return the posterior of the stochastic inflation stationary mean for models in \pkg{BALD}.
##'
##' @name stochasticInflationStationaryMean,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the stochastic inflation stationary mean.
##' @param plotDensity A logical value. If \code{TRUE}, the density is plotted. If \code{plotTrace} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @param plotTrace A logical value. If \code{TRUE}, the trace is plotted. If \code{plotDensity} is also \code{TRUE}, then two plots are generated. If they are both \code{FALSE}, then only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with select quantiles of the stochastic inflation stationary mean. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{stochasticInflationStationaryMean}}
##' \code{\link{stochasticInflationRhoParameter}}
##' \code{\link{stochasticInflation}}
setMethod('stochasticInflationStationaryMean',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plotDensity, plotTrace)
{
if(identical(object@input@stochInflationRate,0) || !is.na(object@input@knownStochInflationMean))
{
warning('Cannot call "stochasticInflationStationaryMean" unless 1) there is a stochastic rate of inflation and 2) the mean is not known. Returning "NULL" invisibly.')
return(invisible(NULL))
}
coda <- slot(object@b.ou, 'value')[1,,] / (1 - slot(object@a.ou, 'value')[1,,])
ans <- plot.density.and.or.trace(coda=coda,
plotDensity = plotDensity ,
plotTrace = plotTrace,
draw.prior = FALSE,
nice.parameter.name='(Log) Inflation Stationary Mean',
zero.line=TRUE)
return(invisible(ans))
})
##' A generic function to plot and/or return predicted and forecast stochastic inflation rates for models in \pkg{BALD}.
##'
##' If the model incorporates a stochastic rate of inflation, then that rate is assumed to follow (on the log scale) an autoregressive process of order 1.
##' (The autoregressive process of order 1 is the discrete equivalent to an Ornstein-Uhlenbeck process.)
##' This function plots the median of the posterior predictive distribution for stochastic inflation (not on the log scale) rates by year.
##' Values are returned prior to the application of any limits or weights.
##' Note that for years where observed values are supplied, the model takes those values at face value.
##' See \code{vignette('BALD')}.
##'
##' @name stochasticInflation
##' @param object The object from which to plot and/or return the stochastic inflation rates.
##' @param extraYears An integer expressing the (maximum) number of years to plot (beyond the final observed year). Must be at least zero. Default is 15.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=stochasticInflation,AnnualAggLossDevModelOutput-method]{stochasticInflation("AnnualAggLossDevModelOutput")}}
##' \code{\link{stochasticInflationRhoParameter}}
##' \code{\link{stochasticInflationStationaryMean}}
##' @exportMethod stochasticInflation
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' stochasticInflation(standard.model.output)
##' }
setGenericVerif('stochasticInflation',
function(object, extraYears=15, plot=TRUE)
{
if(!is.numeric(extraYears))
stop('"extraYears" must be numeric')
if(!identical(length(extraYears), as.integer(1)))
stop('"extraYears" must be of length 1')
if(extraYears < 0)
stop('"extraYears" must be at least zero.')
standardGeneric('stochasticInflation')
})
##' A method to plot and/or return predicted and forecast stochastic inflation rates for models in \pkg{BALD}.
##'
##' @name stochasticInflation,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return predicted and forecast stochastic inflation rates.
##' @param extraYears An integer expressing the (maximum) number of years to plot (beyond the final observed year). Must be at least zero.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array of the median predicted inflation rate (not on the log scale). Returned invisibly.
##' @docType methods
##' @seealso \code{\link{stochasticInflation}}
##' \code{\link{stochasticInflationRhoParameter}}
##' \code{\link{stochasticInflationStationaryMean}}
setMethod('stochasticInflation',
signature(object='AnnualAggLossDevModelOutput'),
function(object, extraYears, plot)
{
if(identical(object@input@stochInflationRate,0))
{
warning('Cannot call "stochasticInflation" unless there is a stochastic rate of inflation. Returning "NULL" invisibly.')
return(invisible(NULL))
}
#don't have to check extraYears because the generic does that for us
#the median holds up under log and exp so we don't have to calculate this draw by draw
simulated.inflation.rate <- exp(object@stoch.log.inf.pred@median) - 1
simulated.inflation.rate.ci <- apply(exp(slot(object@stoch.log.inf.pred, 'value')) - 1, 1, quantile, c(0.05, 0.95))
total.simulated.years <- length(simulated.inflation.rate)
simulated.years <- min(object@input@stochInflationYears) - 1 + 1:total.simulated.years
names(simulated.inflation.rate) <- simulated.years
observed.years <- object@input@stochInflationYears
total.observed.years <- length(observed.years)
observed.inflation.rate <- object@input@stochInflationRate
if(total.observed.years >= total.simulated.years)
extra.years <- 0
else
extra.years <- min(extraYears,
total.simulated.years - total.observed.years)
suppressWarnings( stat.mean <- stochasticInflationStationaryMean(object, plotDensity=FALSE, plotTrace=FALSE))
if(is.null(stat.mean))
{
stat.mean <- object@input@knownStochInflationMean
} else {
stat.mean <- stat.mean['50%']
}
f.plot <- function()
{
plot(x=range(observed.years) + c(0, extra.years),
y=range(observed.inflation.rate, simulated.inflation.rate[1:(total.observed.years + extra.years)], stat.mean),
xlab="Calendar Year",
ylab="Rate of Inflation (Actual and Predicted)",
type='n',
cex.axis=1.25,
cex.lab=1.25)
abline(h=stat.mean,
lwd=2,
col='gray',
lty=3)
lines(
x=observed.years,
y=observed.inflation.rate,
lwd=3,
col='gray')
lines(
x=observed.years,
y=simulated.inflation.rate[1:total.observed.years],
lwd=2,
col='black')
if(extra.years > 0 )
{
lines(
x=max(observed.years) + 1:extra.years,
y=simulated.inflation.rate[total.observed.years + 1:extra.years],
lwd=2,
col='black',
lty=1)
for(ind in c('5%', '95%'))
{
lines(
x=max(observed.years) + 1:extra.years,
y=simulated.inflation.rate.ci[ind, total.observed.years + 1:extra.years],
lwd=2,
col='gray',
lty=2)
}
}
}
f.legend <- function()
{
if(extra.years == 0)
legend('center',
c('Actual','Predicted', 'Stationary\nMean'),
col = c('gray','black', 'gray'),
lwd=c(3,2,2),
lty=c(1,1,3),
horiz=TRUE,
xpd=NA,
bty='n')
else
legend('center',
c('Actual', 'Predicted/\nForecast', '90 Percent\nCredible Interval', 'Stationary\nMean'),
col = c('gray', 'black', 'gray', 'gray'),
lwd=c(3, 2, 2, 2),
lty=c(1, 1, 2, 3),
horiz=TRUE,
xpd=NA,
bty='n')
}
if(plot)
plot.top.bottom(f.plot, f.legend)
return(invisible(simulated.inflation.rate))
})
##' A generic function to plot and/or return predicted and forecast calendar year effect errors for models in \pkg{BALD}.
##'
##' The calendar year effect is comprised of two components: 1) a prior expected value which may be unique to every cell (subject to weights and bounds) and 2) a diagonal-specific error term.
##' This function only plots and returns the error term, which includes an autoregressive component if the model is estimated with such a feature.
##' See \code{vignette('BALD')}.
##'
##' @name calendarYearEffectErrors
##' @param object The object from which to plot and/or return the calendar year effect errors.
##' @param extraYears An integer expressing the (maximum) number of years to plot (beyond the final observed calendar year). Must be greater than or equal to zero. Default is 15.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=calendarYearEffectErrors,AnnualAggLossDevModelOutput-method]{calendarYearEffectErrors("AnnualAggLossDevModelOutput")}}
##' \code{\link{calendarYearEffect}}
##' \code{\link{autoregressiveParameter}}
##' \code{\link{standardDeviationOfCalendarYearEffect}}
##' \code{\link{calendarYearEffectErrorTracePlot}}
##' @exportMethod calendarYearEffectErrors
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' calendarYearEffectErrors(standard.model.output)
##' }
setGenericVerif('calendarYearEffectErrors',
function(object, extraYears=15, plot=TRUE)
{
if(!is.numeric(extraYears))
stop('"extraYears" must be numeric')
if(!identical(length(extraYears), as.integer(1)))
stop('"extraYears" must be of length 1')
if(extraYears < 0)
stop('"extraYears" must be at least zero.')
standardGeneric('calendarYearEffectErrors')
})
##' A method to plot and/or return predicted and forecast calendar year effect errors for models in \pkg{BALD}.
##'
##' @name calendarYearEffectErrors,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the calendar year effect errors.
##' @param extraYears An integer expressing the (maximum) number of years to plot (beyond the final observed calendar year). Must greater than or equal to zero.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with the median predicted errors (not on the log scale). Returned invisibly.
##' @docType methods
##' @seealso \code{\link{calendarYearEffectErrors}}
##' \code{\link{calendarYearEffect}}
##' \code{\link{autoregressiveParameter}}
##' \code{\link{standardDeviationOfCalendarYearEffect}}
##' \code{\link{calendarYearEffectErrorTracePlot}}
setMethod('calendarYearEffectErrors',
signature(object='AnnualAggLossDevModelOutput'),
function(object, extraYears, plot)
{
##don't have to check extraYears because the generic does that for us
##the median holds up under log and exp so we don't have to calculate this draw by draw
kappa.error <- exp(object@kappa.log.error@median[-(1:2)]) - 1 #first value is for diagonal not in the triangle, second is for the first diagonal in the triangle which has no identifiable effect
total.years <- length(kappa.error)
years <- min(object@input@exposureYears) + 1 - 1 + 1:total.years
names(kappa.error) <- years
observed.years <- object@input@exposureYears[-1]
total.observed.years <- length(observed.years)
if(total.observed.years >= total.years)
extra.years <- 0
else
extra.years <- min(extraYears,
total.years - total.observed.years)
f.plot <- function()
{
plot(x=range(observed.years) + c(0, extra.years),
y=range(kappa.error[1:(total.observed.years + extra.years)]),
xlab="Calendar Year",
ylab="Calendar Effect Error",
type='n',
cex.axis=1.25,
cex.lab=1.25)
abline(h=0,
lty=2)
lines(
x=observed.years,
y=kappa.error[1:total.observed.years],
lwd=2,
col='black')
if(extra.years > 0 )
lines(
x=max(observed.years) + 1:extra.years,
y=kappa.error[total.observed.years + 1:extra.years],
lwd=2,
col='gray')
}
f.legend <- function()
{
if(extra.years == 0)
legend('center',
c('Estimated'),
col = c('black'),
lwd=c(2),
horiz=TRUE,
xpd=NA,
bty='n')
else
legend('center',
c('Estimated', 'Predicted'),
col = c('black', 'gray'),
lwd=c(2, 2),
horiz=TRUE,
xpd=NA,
bty='n')
}
if(plot)
plot.top.bottom(f.plot, f.legend)
return(invisible(kappa.error))
})
##' A generic function to plot and/or return the predicted and forecast calendar year effects for models in \pkg{BALD}.
##'
##' The calendar year effect is comprised of two components: 1) a prior expected value that may be unique to every cell (subject to weights and bounds) and 2) a diagonal-specific error term.
##' This function plots and returns the factor resulting from the combined effect of these two, which includes an autoregressive component if the model is estimated with such a feature.
##'
##' The first cell is \code{NA}. Values in the first column represent the rate of inflation/escalation to the corresponding cell from the cell in the same column but previous row.
##' Values in the 2nd column and beyond represent the rate of inflation/escalation to the corresponding cell from the cell in the same row but previous column.
##' See \code{vignette('BALD')}.
##'
##' @name calendarYearEffect
##' @param object The object from which to plot and/or return the calendar year effect.
##' @param restrictedSize A logical value. If \code{TRUE}, the plotted calendar year effect is restricted to the square of dimension equal to the observed triangle with which the model was estimated.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=calendarYearEffect,AnnualAggLossDevModelOutput-method]{calendarYearEffect("AnnualAggLossDevModelOutput")}}
##' \code{\link{calendarYearEffectErrors}}
##' \code{\link{autoregressiveParameter}}
##' \code{\link{standardDeviationOfCalendarYearEffect}}
##' \code{\link{calendarYearEffectErrorTracePlot}}
##' @exportMethod calendarYearEffect
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' calendarYearEffect(standard.model.output)
##' }
setGenericVerif('calendarYearEffect',
function(object, restrictedSize=FALSE, plot=TRUE)
{
standardGeneric('calendarYearEffect')
})
##' A method to plot and/or return predicted and forecast calendar year effects for models in \pkg{BALD}.
##'
##' The first cell is \code{NA}. Values in the first column represent the rate of inflation/escalation to the corresponding cell from the cell in the same column but previous row.
##' Values in the 2nd column and beyond represent the rate of inflation/escalation to the corresponding cell from the cell in the same row but previous column.
##'
##' @name calendarYearEffect,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOuput} from which to plot and/or return the calendar year effect.
##' @param restrictedSize A logical value. If \code{TRUE}, the plotted calendar year effect is restricted to the square of dimension equal to the observed triangle with which the model was estimated.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array with the median predicted values (not on the log scale). Returned invisibly.
##' @docType methods
##' @seealso \code{\link{calendarYearEffect}}
##' \code{\link{calendarYearEffectErrors}}
##' \code{\link{autoregressiveParameter}}
##' \code{\link{standardDeviationOfCalendarYearEffect}}
##' \code{\link{calendarYearEffectErrorTracePlot}}
setMethod('calendarYearEffect',
signature(object='AnnualAggLossDevModelOutput'),
function(object, restrictedSize, plot)
{
if(plot)
{
K <- getTriDim(object@input)[1]
kappa <- object@kappa@median
kappa[1,1] <- NA
if(restrictedSize)
kappa <- kappa[1:K,1:K]
all.exp.years <- min(object@input@exposureYears) - 1 + 1:dim(kappa)[1]
data <- expand.grid(row=all.exp.years, column=1:ncol(kappa))
data$z <- as.vector(as.numeric(kappa))
print(
levelplot(z ~ column * row,
data,
aspect='iso',
ylim=(range(all.exp.years) + c(-0.5, 0.5))[c(2,1)],
col.regions=grey(seq(from=1, to=0, length.out=100)),
xlab='Development Year',
ylab=getExposureYearLabel(object@input)))
}
ans <- object@kappa@median
ans[1,1] <- NA
dimnames(ans)[[1]] <- min(object@input@exposureYears) - 1 + 1:dim(ans)[1]
return(invisible(ans))
})
##' A generic function to plot predicted vs actual payments for models from the \pkg{BALD} package.
##'
##' Because the model is Bayesian, each estimated payment comes as a distribution.
##' The median of this distribution is used as a point estimate when plotting and/or returning values.
##' Note: One cannot calculate the estimated incremental payments from the estimated cumulative payments (and vice versa) since the median of sums need not be equal to the sum of medians.
##'
##' If \code{mergePredictedWithObserved=TRUE} and \code{type="incremental"}, then any observed incremental payment will be used in place of its corresponding incremental payment.
##' If \code{mergePredictedWithObserved=TRUE} and \code{type="cumulative"}, then only predicted incremental payments (by row) to the right of the last observed cumulative value will enter the calculation.
##' See \code{vignette('BALD')}.
##'
##' @name predictedPayments
##' @param object The object from which to plot predicted vs actual payments and from which to return predicted payments.
##' @param type A single character value specifying whether to plot/return the predicted incremental or cumulative payments. Valid values are \dQuote{incremental} or \dQuote{cumulative.} See details as to why these may not match up.
##' @param logScale A logical value. If \code{TRUE}, then values are plotted on a log scale.
##' @param mergePredictedWithObserved A logical value. See details.
##' @param plotObservedValues A logical value. If \code{FALSE}, then only the predicted values are plotted.
##' @param plotPredictedOnlyWhereObserved A logical value. If \code{TRUE}, then only the predicted incremental payments with valid corresponding observed (log) incremental payment are plotted. Ignored for \code{type="cumulative"}.
##' @param quantiles A vector of quantiles for the predicted payments to return. Useful for constructing credible intervals.
##' @param plot A logical value. If \code{TRUE}, then the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=predictedPayments,AnnualAggLossDevModelOutput-method]{predictedPayments("AnnualAggLossDevModelOutput")}}
##' @exportMethod predictedPayments
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' predictedPayments(standard.model.output)
##' }
setGenericVerif('predictedPayments',
function(object, type=c('incremental', 'cumulative'), logScale=TRUE, mergePredictedWithObserved=FALSE, plotObservedValues=logScale, plotPredictedOnlyWhereObserved=FALSE, quantiles=c(0.05, .5, 0.95), plot=TRUE)
standardGeneric('predictedPayments'))
##' A method to plot predicted vs actual payments for models from the \pkg{BALD} package.
##'
##' Because the model is Bayesian, each estimated payment comes as a distribution.
##' The median of this distribution is used as a point estimate when plotting and/or returning values.
##' Note: One cannot calculate the estimated incremental payments from the estimated cumulative payments (and vice versa) since the median of sums need not be equal to the sum of medians.
##'
##' If \code{mergePredictedWithObserved=TRUE} and \code{type="incremental"}, then any observed incremental payment will be used in place of its corresponding incremental payment.
##' If \code{mergePredictedWithObserved=TRUE} and \code{type="cumulative"}, then only predicted incremental payments (by row) to the right of the last observed cumulative value will enter the calculation.
##'
##' @name predictedPayments,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOutput} from which to plot predicted vs actual payments and return predicted payments.
##' @param type A singe character value specifying whether to plot/return the predicted incremental or cumulative payments. Valid values are "incremental" or "cumulative." See details as to why these may not match up.
##' @param logScale A logical value. If \code{TRUE}, then values are plotted on a log scale.
##' @param mergePredictedWithObserved A logical value. If \code{TRUE}, then the returned values treat observed incremental payments at "face value"; otherwise predicted values are used in place of observed values.
##' @param plotObservedValues A logical value. If \code{FALSE}, then only the predicted values are plotted.
##' @param plotPredictedOnlyWhereObserved A logical value. See details.
##' @param quantiles A vector of quantiles for the predicted payments to return. Usefull for constructing credible intervals.
##' @param plot A logical value. If \code{TRUE}, then the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a named array (with the same structure as the input triangle) containing the predicted log incremental payments. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{predictedPayments}}
setMethod('predictedPayments',
signature(object='AnnualAggLossDevModelOutput'),
function(object, type, logScale, mergePredictedWithObserved, plotObservedValues, plotPredictedOnlyWhereObserved, quantiles, plot)
{
type <- match.arg(type)
K <- getTriDim(object@input)[1]
inc.obs <- object@input@incrementals
cumul.obs <- object@input@cumulatives
inc.pred.coda <- slot(object@inc.pred, 'value')
inc.pred.median <- object@inc.pred@median
if(type == 'cumulative')
{
cumul.pred.coda <- array(NA, dim(inc.pred.coda))
cumul.pred.coda[,1,,] <- inc.pred.coda[,1,,]
for(i in 2:dim(inc.pred.coda)[2])
cumul.pred.coda[,i,,] <- cumul.pred.coda[,i-1,,] + inc.pred.coda[,i,,]
cumul.pred.median <- apply(cumul.pred.coda, c(1,2), median)
}
##trim down the predicted values to the size of the original triangle
inc.pred.median.trim <- inc.pred.median[1:K, 1:K]
##get rid of predicted values which do not correspond to observed values
inc.pred.median.trim.only.where.obs <- inc.pred.median.trim
inc.pred.median.trim.only.where.obs[inc.obs <= 0 | is.na(inc.obs)] <- NA
if(type == 'incremental')
{
ans <- inc.pred.coda
dimnames(ans)[[1]] <- min(object@input@exposureYears) - 1 + 1:dim(ans)[1]
if(mergePredictedWithObserved)
ans[1:K, 1:K, , ][!is.na(inc.obs)] <- inc.obs[!is.na(inc.obs)]
ans <- apply(ans, c(1, 2), quantile, quantiles)
} else {
if(!mergePredictedWithObserved)
{
ans <- cumul.pred.coda
dimnames(ans)[[1]] <- min(object@input@exposureYears) - 1 + 1:dim(ans)[1]
ans <- apply(ans, c(1, 2), quantile, quantiles)
} else {
tmp <- array(NA,
dim(inc.pred.coda),
dimnames=list(min(object@input@exposureYears) - 1 + 1:dim(inc.pred.coda)[1], NULL))
tmp[1:K, 1:K, , ] <- cumul.obs
for(i in 1:K)
{
j.lower <- which(!is.na(cumul.obs[i,]))
if(length(j.lower) == 0)
{
j.lower <- 1
} else {
j.lower <- max(j.lower)
}
for(j in (j.lower+1):(dim(inc.pred.coda)[2]))
tmp[i,j,,] <- tmp[i,j-1,,] + inc.pred.coda[i,j,,]
}
tmp[(K+1):(dim(cumul.pred.coda)[1]), , , ] <- cumul.pred.coda[(K+1):(dim(cumul.pred.coda)[1]), , , ]
ans <- apply(tmp, c(1,2), quantile, probs = quantiles, na.rm = TRUE)
}
}
plot.f <- function()
{
if(type == 'incremental')
{
if(plotPredictedOnlyWhereObserved)
{
inc.pred.for.plot <- inc.pred.median.trim.only.where.obs
} else {
inc.pred.for.plot <- inc.pred.median
}
if(logScale)
{
inc.pred.for.plot[inc.pred.for.plot <= 0] <- NA
inc.obs.for.plot <- inc.obs
inc.obs.for.plot[inc.obs.for.plot <= 0] <- NA
} else {
inc.obs.for.plot <- inc.obs
}
x.range <- c(1, dim(inc.pred.for.plot)[2])
if(plotObservedValues)
y.range <- range(inc.pred.for.plot, inc.obs.for.plot, na.rm=TRUE)
else
y.range <- range(inc.pred.for.plot, na.rm=TRUE)
plot(x=x.range,
y=y.range,
ylab='Incremental Payments',
xlab='Development Year',
type='n',
log=ifelse(logScale, 'y', ''),
cex.axis=1.25,
cex.lab=1.25)
for(i in 1:dim(inc.pred.for.plot)[1])
{
tmp <- inc.pred.for.plot[i,]
lines(x=1:length(tmp),
y=tmp,
col=get.color(i),
type=ifelse(plotPredictedOnlyWhereObserved, 'o', 'l'),
pch=20)
}
if(plotObservedValues)
{
for(i in 1:dim(inc.obs.for.plot)[1])
{
tmp <- inc.obs.for.plot[i,]
lines(x=1:length(tmp),
y=tmp,
type='p',
col=get.color(i))
}
}
} else {
cumul.pred.for.plot <- cumul.pred.median
if(logScale)
{
cumul.pred.for.plot[cumul.pred.for.plot <= 0] <- NA
cumul.obs.for.plot <- cumul.obs
cumul.obs.for.plot[cumul.obs.for.plot <= 0] <- NA
} else {
cumul.obs.for.plot <- cumul.obs
}
x.range <- c(1, dim(cumul.pred.for.plot)[2])
if(plotObservedValues)
y.range <- range(cumul.pred.for.plot, cumul.obs.for.plot, na.rm=TRUE)
else
y.range <- range(cumul.pred.for.plot, na.rm=TRUE)
plot(x=x.range,
y=y.range,
ylab='Cumulative Payments',
xlab='Development Year',
type='n',
log=ifelse(logScale, 'y', ''),
cex.axis=1.25,
cex.lab=1.25)
for(i in 1:dim(cumul.pred.for.plot)[1])
{
tmp <- cumul.pred.for.plot[i,]
lines(x=1:length(tmp),
y=tmp,
col=get.color(i))
}
if(plotObservedValues)
{
for(i in 1:dim(cumul.obs.for.plot)[1])
{
tmp <- cumul.obs.for.plot[i,]
lines(x=1:length(tmp),
y=tmp,
type='p',
col=get.color(i))
}
}
}
}
legend.f <- function()
{
if(plotObservedValues)
{
legend('center',
legend=c('Predicted','Observed'),
col='black',
lwd=2,
lty=c(1, NA),
pch=c(ifelse(type=='incremental' && plotPredictedOnlyWhereObserved, 20, NA), 1),
horiz=TRUE,
bty='n',
xpd=NA)
}
}
if(plot)
plot.top.bottom(plot.f, legend.f)
return(invisible(ans))
})
##' A generic function to plot and/or return the posterior estimated standard deviation by development year.
##'
##' Aggregate loss development models in \pkg{BALD} allow for changes (by development year) in the measurement error around the log incremental payments.
##' This is a generic function that allows for the retrieval and illustration of this standard deviation.
##' See \code{vignette('BALD')}.
##'
##' @name standardDeviationVsDevelopmentTime
##' @param object The object from which to plot and/or return the estimated standard deviation by development year.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=standardDeviationVsDevelopmentTime,AnnualAggLossDevModelOutput-method]{standardDeviationVsDevelopmentTime("AnnualAggLossDevModelOutput")}}
##' @exportMethod standardDeviationVsDevelopmentTime
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' standardDeviationVsDevelopmentTime(standard.model.output)
##' }
setGenericVerif('standardDeviationVsDevelopmentTime',
function(object, plot=TRUE)
standardGeneric('standardDeviationVsDevelopmentTime'))
##' A method to plot and/or return the posterior estimated standard deviation by development year.
##'
##' Aggregate loss development models in \pkg{BALD} allow for changes (by development year) in the measurement error around the log incremental payments.
##' This is a method that allows for the retrieval and illustration of this standard deviation.
##'
##' @name standardDeviationVsDevelopmentTime,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOutput} from which to plot and/or return the estimated standard deviation by development year.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a numeric vector of the plotted statistics. Returned invisibly.
##' @docType methods
##' @seealso \code{\link{standardDeviationVsDevelopmentTime}}
setMethod('standardDeviationVsDevelopmentTime',
signature(object='AnnualAggLossDevModelOutput'),
function(object, plot)
{
K <- getTriDim(object@input)[1]
beta <- slot(object@beta, 'value')[1,,]
v <- slot(object@df, 'value')[1,,]
h <- slot(object@h, 'value')
st.d <- array(NA, c(3, K), list(c('95%', '50%', '5%'), NULL))
for(i in 1:K)
{
tmp <- quantile(
sqrt(h[i,,] ^ 2 * v / (v - 2) + 2 * beta ^ 2 * h[i,,] ^ 4 * v ^ 2 / (v - 2) ^ 2 / (v - 4)),
c(.95, .5, .05))
st.d[names(tmp),i] <- tmp
}
f.plot <- function()
{
matplot(y=t(st.d),
x=1:K,
col=c('grey', 'black', 'grey'),
lwd=c(3,2,3),
lty=c(2,1,2),
xlab='Development Year',
ylab="Standard Deviation in Measurement Equation",
cex.axis=1.25,
cex.lab=1.25,
type='l')
}
f.legend <- function()
{
legend('center',
legend=c('Median','90 Percent Credible Interval'),
col=c('black', 'grey'),
lwd=c(2,3),
lty=c(1,2),
horiz=TRUE,
bty='n',
xpd=NA)
}
if(plot)
plot.top.bottom(f.plot, f.legend)
return(invisible(st.d))
})
##' A generic function to generate the trace plots for select exposure growth rates.
##' See \code{vignette('BALD')}.
##'
##' @name exposureGrowthTracePlot
##' @param object The object from which to generate the trace plots.
##' @param elements A numeric vector indicating the elements for which to plot the trace. Valid values are 2 through the total number of exposure years. If NULL, values are selected automatically.
##' @return NULL invisibly. Only called for the side effect of plotting.
##' @seealso \code{\link[=exposureGrowthTracePlot,AnnualAggLossDevModelOutput-method]{exposureGrowthTracePlot("AnnualAggLossDevModelOutput")}}
##' \code{\link{exposureGrowth}}
##' @exportMethod exposureGrowthTracePlot
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' exposureGrowthTracePlot(standard.model.output)
##' }
setGenericVerif('exposureGrowthTracePlot',
function(object, elements=NULL)
standardGeneric('exposureGrowthTracePlot'))
##' A method to generate the trace plots for select exposure growth rates.
##'
##'
##' @name exposureGrowthTracePlot,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOutput} from which to generate the trace plots.
##' @param elements A numeric vector indicating the elements for which to plot the trace. If NULL, values are selected automatically.
##' @return NULL invisibly. Only called for the side effect of plotting.
##' @docType methods
##' @seealso \code{\link{exposureGrowthTracePlot}}
##' \code{\link{exposureGrowth}}
setMethod('exposureGrowthTracePlot',
signature(object='AnnualAggLossDevModelOutput'),
function(object, elements)
{
lb <- 2
ub <- object@input@totalExpYears
if(is.null(elements))
{
elements <- c(lb, floor((lb + ub) / 2), ub)
} else {
if(!is.numeric(elements) || !is.vector(elements))
stop('"elements" must either be "NULL" or a numeric vector')
if(any(elements != as.integer(elements)))
stop('"elements" must be coercible to integers')
if(any(elements > ub) || any(elements < lb))
stop(paste('"elements" must be at most the total number of exposure years (', ub,') and at least 2', sep=''))
}
plot.trace.plots(slot(object@eta, 'value')[elements,,], paste('Exposure Growth :', elements, sep=''))
})
##' A generic function to generate the trace plots for select calendar year effect errors.
##'
##' The calendar year effect is comprised of two components: 1) a prior expected value that may be unique to every cell and 2) a diagonal-specific error term.
##' This function generates trace plots for the diagonal specific error terms only.
##' See \code{vignette('BALD')}.
##'
##' @name calendarYearEffectErrorTracePlot
##' @param object The object from which to generate the trace plots.
##' @param elements A numeric vector indicating the elements for which to plot the trace. Valid values are 2 through the total number of exposure years(observed and forecast). If NULL, values are selected automatically.
##' @return NULL invisibly. Only called for the side effect of plotting.
##' @seealso \code{\link[=calendarYearEffectErrorTracePlot,AnnualAggLossDevModelOutput-method]{calendarYearEffectErrorTracePlot("AnnualAggLossDevModelOutput")}}
##' \code{\link{calendarYearEffectErrors}}
##' @exportMethod calendarYearEffectErrorTracePlot
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' calendarYearEffectErrorTracePlot(standard.model.output)
##' }
setGenericVerif('calendarYearEffectErrorTracePlot',
function(object, elements=NULL)
standardGeneric('calendarYearEffectErrorTracePlot'))
##' A method to generate the trace plots for select calendar year effect errors.
##'
##' @name calendarYearEffectErrorTracePlot,AnnualAggLossDevModelOutput-method
##' @param object The object of type \code{AnnualAggLossDevModelOutput} from which to generate the trace plots.
##' @param elements A numeric vector indicating the elements for which to plot the trace. Valid values are 2 through the total number of exposure years. If NULL, values are selected automatically.
##' @return NULL invisibly. Only called for the side effect of plotting.
##' @docType methods
##' @seealso \code{\link{calendarYearEffectErrorTracePlot}}
##' \code{\link{calendarYearEffectErrors}}
setMethod('calendarYearEffectErrorTracePlot',
signature(object='AnnualAggLossDevModelOutput'),
function(object, elements)
{
lb <- 2
ub <- object@input@totalExpYears
if(is.null(elements))
{
elements <- c(lb, floor((lb + ub) / 2), ub)
} else {
if(!is.numeric(elements) || !is.vector(elements))
stop('"elements" must either be "NULL" or a numeric vector')
if(any(elements != as.integer(elements)))
stop('"elements" must be coercible to integers')
if(any(elements > ub) || any(elements < lb))
stop(paste('"elements" must be at most the total number of exposure years (', ub,') and at least 3', sep=''))
}
elements <- elements + 1 #in the model file, the first identifiable calendar year effect is numbered 3
plot.trace.plots(exp(slot(object@kappa.log.error, 'value')[elements,,]) - 1, paste('Calendar Year Effect Error :', elements, sep=''))
})
##' A generic function to plot and/or return a table of predicted age-to-age loss development factors (or link ratios).
##'
##' While the model estimates ultimate losses directly, comparisons of predicted to observed development factors can give the user a better feel for the model's adequacy.
##' Since the model is Bayesian, each development factor comes as a distribution. Only the median, as a point estimate, are plotted/returned.
##'
##' The age-to-age factors are the ratios of the cumulative paid values at one period to the previous period.
##' Note that the median of products is not the product of medians, and thus it is possible (or rather likely) that age-to-age factors will not line up with age-to-ultimate factors (see \code{\link{tailFactor}}).
##' See \code{vignette('BALD')}.
##'
##' @name lossDevelopmentFactors
##' @param object The object from which to plot and/or return loss development factors.
##' @param cex.text The \code{cex} value supplied to \code{text}. Adjusts the relative size of text.
##' @param linespace Adjusts the spacing between observed and predicted values.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns a numeric matrix of plotted statistics.
##' @seealso \code{\link[=lossDevelopmentFactors,AnnualAggLossDevModelOutput-method]{lossDevelopmentFactors("AnnualAggLossDevModelOutput")}}
##' \code{\link{tailFactor}}
##' @exportMethod lossDevelopmentFactors
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' lossDevelopmentFactors(standard.model.output)
##' }
setGenericVerif('lossDevelopmentFactors',
function(object, cex.text=.77, linespace=0.5, plot=TRUE)
{
if(!is.numeric(cex.text) || length(cex.text) != 1)
stop('"cex.text" must be numeric of length 1')
if(!is.numeric(linespace) || length(linespace) != 1)
stop('"linespace" must be numeric of length 1')
standardGeneric('lossDevelopmentFactors')
})
##' A method to plot and/or return a table of predicted age-to-age loss development factors (or link ratios).
##'
##' @name lossDevelopmentFactors,AnnualAggLossDevModelOutput-method
##' @param object The object of type /code{AnnualAggLossDevModelOutput} from which to plot and/or return the factors.
##' @param cex.text The \code{cex} value supplied to \code{text}. Adjusts the relative size of text.
##' @param linespace Adjusts the spacing between observed and predicted values.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting, but also returns a numeric matrix of plotted statistics.
##' @docType methods
##' @seealso \code{\link{lossDevelopmentFactors}}
##' \code{\link{tailFactor}}
setMethod('lossDevelopmentFactors',
signature(object='AnnualAggLossDevModelOutput'),
function(object, cex.text, linespace, plot)
{
col.grey <- grey(0.45)
K <- getTriDim(object@input)[1]
obs.exp.years <- object@input@exposureYears
pred.exp.years <- max(obs.exp.years) + 1:(object@input@totalExpYears - K)
all.exp.years <- c(obs.exp.years, pred.exp.years)
ey.type <- object@input@triangleType
obs.ldf <- object@input@cumulatives[,-1] / object@input@cumulatives[,-K]
obs.ldf <- rbind(obs.ldf, array(NA, c(length(pred.exp.years), dim(obs.ldf)[2])))
ldf.pred <- array(NA, dim(obs.ldf))
inc.pred <- slot(object@inc.pred, 'value')
current.loss <- inc.pred[, 1, , ]
for(j in 1:dim(ldf.pred)[2])
{
next.loss <- current.loss + inc.pred[, j + 1, , ]
ldf.pred[,j] <- apply(next.loss / current.loss, 1, median, na.rm = TRUE)
current.loss <- next.loss
}
if(ey.type=='py')
{
sub <- 'First Column is Link From Half to First'
} else {
sub <- 'First Column is Link From First to Second'
}
plot(x=range(1:(K-1)) + c(-0.5, 0.5),
y=(range(obs.exp.years, pred.exp.years) + c(-0.5, 0.5))[c(2,1)],
ylim=(range(obs.exp.years, pred.exp.years) + c(-0.5, 0.5))[c(2,1)],
ylab=getExposureYearLabel(object@input),
xlab='Development Year',
cex.axis=1.25,
cex.lab=1.25,
type="n",
font.main=1,
cex.main=1.5,
sub=sub)
for (i in 1:length(all.exp.years))
{
for (j in 1:(K-1))
{
text(x=j,
y=all.exp.years[i],
as.character(format(ldf.pred[i,j], digits=4, nsmall=3)),
font=1,
cex=cex.text,
col='black')
text(x=j,
y=all.exp.years[i]+linespace,
ifelse(is.na(obs.ldf[i,j]), '-', as.character(format(obs.ldf[i,j], digits=4, nsmall=3))),
font=1,
cex=cex.text,
col=col.grey)
}
}
})
##' A generic function to plot the trace plots for select rate of decay values.
##' See \code{vignette('BALD')}.
##'
##' @name rateOfDecayTracePlot
##' @param object The object from which to generate the trace plots.
##' @param elements A numeric vector indicating for which elements to plot the trace. Valid values are 2 through the number of columns in the observed triangle. If NULL, values are selected automatically.
##' @param \dots Additional arguments used by methods.
##' @return NULL invisibly. Only called for the side effect of plotting.
##' @seealso \code{\link[=rateOfDecayTracePlot,StandardAnnualAggLossDevModelOutput-method]{rateOfDecayTracePlot("StandardAnnualAggLossDevModelOutput")}}
##' \code{\link{rateOfDecay}}
##' @exportMethod rateOfDecayTracePlot
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' rateOfDecayTracePlot(standard.model.output)
##' }
setGenericVerif('rateOfDecayTracePlot',
function(object, elements=NULL, ...)
standardGeneric('rateOfDecayTracePlot'))
##' A generic function to generate the trace plots for select consumption path values.
##' See \code{vignette('BALD')}.
##'
##' @name consumptionPathTracePlot
##' @param object The object from which to generate the trace plots.
##' @param elements A numeric vector indicating the elements for which to plot the trace. Valid values are 1 through the number of development years (columns) in the observed triangle. If NULL, values are selected automatically.
##' @param \dots Additional arguments used by methods.
##' @return NULL invisibly. Only called for the side effect of plotting.
##' @seealso \code{\link[=consumptionPathTracePlot,StandardAnnualAggLossDevModelOutput-method]{consumptionTracePlot("StandardAnnualAggLossDevModelOutput")}}
##' \code{\link{consumptionPath}}
##' @exportMethod consumptionPathTracePlot
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' consumptionPathTracePlot(standard.model.output)
##' }
setGenericVerif('consumptionPathTracePlot',
function(object, elements=NULL, ...)
standardGeneric('consumptionPathTracePlot'))
##' A generic function to plot and/or return the estimated consumption path vs development year time.
##'
##' At the heart of aggregate loss development models in \pkg{BALD} is the consumption path.
##' The consumption path is (on a log scale) the trajectory of incremental payments absent calendar year effects and with exposure normalized to the first row.
##' Note that the measurement error term is (possibly) a skewed \eqn{t} and as such (possibly) has a non zero mean. The consumption path is absent any such shifts due to skewness.
##' This is a generic function that allows for the retrieval and illustration of this consumption path.
##' See \code{vignette('BALD')}.
##'
##' @name consumptionPath
##' @param object The object from which to plot and/or return the estimated consumption path.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns the plotted statistics. Returned invisibly.
##' @seealso \code{\link[=consumptionPath,StandardAnnualAggLossDevModelOutput-method]{consumptionPath("StandardAnnualAggLossDevModelOutput")}}
##' \code{\link[=consumptionPath,BreakAnnualAggLossDevModelOutput-method]{consumptionPath("BreakAnnualAggLossDevModelOutput")}}
##' \code{\link{consumptionPathTracePlot}}
##' @exportMethod consumptionPath
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' consumptionPath(standard.model.output)
##' }
setGenericVerif('consumptionPath',
function(object, plot=TRUE)
standardGeneric('consumptionPath'))
##' A generic function to plot and/or return the esimtated rate of decay vs development year time.
##'
##' The simplest definition of the rate of decay is the exponentiated first difference of the \link[=consumptionPath]{consumption path}.
##' This is a generic function to allow for the retrieval and illustration of the rate of decay.
##' See \code{vignette('BALD')}.
##'
##' @name rateOfDecay
##' @param object The object from which to plot and/or return the estimated rate of decay.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns the plotted statistics. Returned invisibly.
##' @seealso \code{\link[=rateOfDecay,StandardAnnualAggLossDevModelOutput-method]{rateOfDecay("StandardAnnualAggLossDevModelOutput")}}
##' \code{\link[=rateOfDecay,BreakAnnualAggLossDevModelOutput-method]{rateOfDecay("BreakAnnualAggLossDevModelOutput")}}
##' \code{\link{consumptionPath}}
##' \code{\link{rateOfDecayTracePlot}}
##' @exportMethod rateOfDecay
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' rateOfDecay(standard.model.output)
##' }
setGenericVerif('rateOfDecay',
function(object, plot=TRUE)
standardGeneric('rateOfDecay'))
##' A generic function to plot and/or return the predicted tail factors for a specific attachment point.
##'
##' The tail factor is the ratio of the estimated ultimate loss to cumulative loss at some point in development time.
##' This is a generic function to allow for the retrieval and illustration the tail factor by exposure year.
##'
##' \bold{Note on \code{firstIsHalfReport} and \code{attachment}:} \code{firstIsHalfReport} refers to the first column of the triangle.
##' For policy year triangles, the first column is often referred to as a \dQuote{half-report}, the second column is called \dQuote{first-report}, the third column is called \dQuote{second-report}, etc.
##' If \code{firstIsHalfReport=TRUE}, then \code{tailFactor} will assume the triangle is arranged in such a way that the first column is the \dQuote{half-report}
##' and \code{attachment=1} indicates that the charted tail factor attaches at the cumulative loss through the second column. If \code{firstIsHalfReport=FALSE},
##' then \code{attachment=1} indicates that the charted tail factor attaches at the cumulative loss through the first column. Since \code{attachment} must be coercible to an integer,
##' it is impossible to plot half-to-ultimate tail factors; however, they are the first column in the returned matrix.
##'
##' \code{firstIsHalfReport} can be \code{NA} (the default)
##' if the exposure year type was specified to be one of \dQuote{policy year} or \dQuote{accident year} at the time the input object was constructed (see \code{\link{makeStandardAnnualInput}}
##' or \code{\link{makeBreakAnnualInput}}). An exposure year type of \dQuote{policy year} corresponds to \code{firstIsHalfReport=TRUE},
##' and an exposure year type of \dQuote{accident year} corresponds to \code{firstIsHalfReport=FALSE}. Setting \code{firstIsHalfReport} to a non-missing value will override this default.
##'
##' If \code{expYearRange} is \dQuote{fullyObs}, then only exposure years with a non missing value in the first column will be plotted.
##' See \code{vignette('BALD')}.
##'
##' @name tailFactor
##' @param object The object from which to plot the predicted tail factors and return tail factors for \emph{all} attachment points.
##' @param attachment An integer value specifying the attachment point for the tail. Must be at least 1. See Details for more information.
##' @param useObservedValues A logical value. If \code{TRUE}, observed values are substituted for predicted values whenever possible in the calculation. If \code{FALSE}, only predicted values are used.
##' @param firstIsHalfReport A logical value or \code{NA}. See Details for more info.
##' @param finalAttachment An integer value must be at least 1. Default value is \code{attachment}. A call to \code{tailFactor} will return (invisibly) a matrix of tail factors through this value.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @param expYearRange Either a range of years (for example c(1995, 2006)) or one of the keywords \dQuote{all} or \dQuote{fullyObs}.
##' @return Mainly called for the side effect of plotting.
##' @seealso \code{\link[=tailFactor,StandardAnnualAggLossDevModelOutput-method]{tailFactor("StandardAnnualAggLossDevModelOutput")}}
##' \code{\link[=tailFactor,BreakAnnualAggLossDevModelOutput-method]{tailFactor("BreakAnnualAggLossDevModelOutput")}}
##' @exportMethod tailFactor
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' tailFactor(standard.model.output,10)
##' }
setGenericVerif('tailFactor',
function(object, attachment, useObservedValues=FALSE, firstIsHalfReport=NA, finalAttachment=attachment, plot=TRUE, expYearRange='all')
{
if(!is.numeric(attachment))
stop('"attachment" must be numeric')
if(!identical(length(attachment), as.integer(1)))
stop('"attachment" must be of length 1')
if(is.na(attachment))
stop('"attachment" cannot be NA')
if(as.integer(attachment) != attachment)
stop('"attachment" must be coercible to an integer')
if(attachment <= 0)
stop('"attachment" must be at least 1')
if(!is.numeric(finalAttachment))
stop('"finalAttachment" must be numeric')
if(!identical(length(finalAttachment), as.integer(1)))
stop('"finalAttachment" must be of length 1')
if(is.na(finalAttachment))
stop('"finalAttachment" cannot be NA')
if(as.integer(finalAttachment) != finalAttachment)
stop('"finalAttachment" must be coercible to an integer')
if(finalAttachment <= 0)
stop('"finalAttachment" must be at least 1')
if(finalAttachment < attachment)
stop('"finalAttachment" must be at least equal to "attachment"')
if(is.character(expYearRange))
{
if(length(expYearRange) != 1)
stop('"expYearRange" must be of length one if it is a character')
if(expYearRange != 'all' && expYearRange != 'fullyObs')
stop('"expYearRange" must be one of "all" or "fullyObs" if it is supplied as a character')
##if(expYearRange == 'all')
##expYearRange <- range(exp.years)
##else
##expYearRange <- range(exp.years[which(!is.na(cumulatives[,1]))])
} else {
if(!all(as.integer(expYearRange) == expYearRange))
stop('"expYearRange" must be supplied as an integer')
if(length(expYearRange) != 2)
stop('"expYearRange" must have length 2')
##if(max(exp.years) < max(expYearRange) || min(exp.years) > min(expYearRange))
##stop('"expYearRange" must be a subset of the actual exposure years')
}
standardGeneric('tailFactor')
})
##' A generic function to plot and/or return the posterior number of knots.
##'
##' The \link[=consumptionPath]{consumption path} (or calendar year effect and exposure growth adjusted log incremental payments) is modeled as a linear spline.
##' The number of knots (or places where the spline changes slope) in this spline is endogenous to the model and estimated by way of Reversible Jump Markov Chain Monte Carlo simulation.
##' See \code{vignette('BALD')}.
##'
##' @name numberOfKnots
##' @param object The object from which to plot the number of knots.
##' @param plot A logical value. If \code{TRUE}, the plot is generated and the statistics are returned; otherwise only the statistics are returned.
##' @return Mainly called for the side effect of plotting. Also returns statics on the number of knots. Returned invisibly.
##' @seealso \code{\link{consumptionPath}}
##' \code{\link[=numberOfKnots,StandardAnnualAggLossDevModelOutput-method]{numberOfKnots("StandardAnnualAggLossDevModelOutput")}}
##' \code{\link[=numberOfKnots,BreakAnnualAggLossDevModelOutput-method]{numberOfKnots("BreakAnnualAggLossDevModelOutput")}}
##' @exportMethod numberOfKnots
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' numberOfKnots(standard.model.output,10)
##' }
setGenericVerif('numberOfKnots',
function(object, plot=TRUE)
{
standardGeneric('numberOfKnots')
})
##' A generic function to plot autocorrelations found in the \acronym{MCMC} samples for select parameters.
##'
##' Chains with high autocorrelation require a longer burnin and more samples to fully explore the parameter space.
##' See \code{vignette('BALD')}.
##'
##' @name mcmcACF
##' @param object The object from which to plot autocorrelations.
##' @return Called for the side effect of plotting.
##' @seealso \code{\link[=mcmcACF,StandardAnnualAggLossDevModelOutput-method]{mcmcACF("StandardAnnualAggLossDevModelOutput")}}
##' \code{\link[=mcmcACF,BreakAnnualAggLossDevModelOutput-method]{mcmcACF("BreakAnnualAggLossDevModelOutput")}}
##' @exportMethod mcmcACF
##' @examples
##' rm(list=ls())
##' options(device.ask.default=FALSE)
##' library(BALD)
##' data(IncrementalGeneralLiablityTriangle)
##' IncrementalGeneralLiablityTriangle <- as.matrix(IncrementalGeneralLiablityTriangle)
##' print(IncrementalGeneralLiablityTriangle)
##' data(PCE)
##' PCE <- as.matrix(PCE)[,1]
##' PCE.rate <- PCE[-1] / PCE[-length(PCE)] - 1
##' PCE.rate.length <- length(PCE.rate)
##' PCE.years <- as.integer(names(PCE.rate))
##' years.available <- PCE.years <= max(as.integer(
##' dimnames(IncrementalGeneralLiablityTriangle)[[1]]))
##' PCE.rate <- PCE.rate[years.available]
##' PCE.rate.length <- length(PCE.rate)
##' standard.model.input <- makeStandardAnnualInput(
##' incremental.payments = IncrementalGeneralLiablityTriangle,
##' stoch.inflation.weight = 1,
##' non.stoch.inflation.weight = 0,
##' stoch.inflation.rate = PCE.rate,
##' exp.year.type = 'ay',
##' extra.dev.years=5,
##' use.skew.t=TRUE)
##' \dontrun{
##' standard.model.output <- runLossDevModel(
##' standard.model.input,
##' burnIn=30.0E+3,
##' sampleSize=30.0E+3,
##' thin=10)
##' mcmcACF(standard.model.output)
##' }
setGenericVerif('mcmcACF',
function(object)
{
standardGeneric('mcmcACF')
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.