content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/list.count.R
\name{list.count}
\alias{list.count}
\title{Count the number of elements that satisfy given condition}
\usage{
list.count(.data, cond)
}
\arguments{
\item{.data}{A \code{list} or \code{vector}}
\item{cond}{A logical lambda expression for each element of \code{.data} to evaluate. If
\code{cond} is missing then the total number of elements in \code{.data} will be returned.}
}
\value{
An integer that indicates the number of elements with which \code{cond} is evaluated
to be \code{TRUE}.
}
\description{
Count the number of elements that satisfy given condition
}
\examples{
x <- list(p1 = list(type='A',score=list(c1=10,c2=8)),
p2 = list(type='B',score=list(c1=9,c2=9)),
p3 = list(type='B',score=list(c1=9,c2=7)))
list.count(x, type=='B')
list.count(x, min(unlist(score)) >= 9)
}
| /man/list.count.Rd | permissive | paulhendricks/rlist | R | false | false | 897 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/list.count.R
\name{list.count}
\alias{list.count}
\title{Count the number of elements that satisfy given condition}
\usage{
list.count(.data, cond)
}
\arguments{
\item{.data}{A \code{list} or \code{vector}}
\item{cond}{A logical lambda expression for each element of \code{.data} to evaluate. If
\code{cond} is missing then the total number of elements in \code{.data} will be returned.}
}
\value{
An integer that indicates the number of elements with which \code{cond} is evaluated
to be \code{TRUE}.
}
\description{
Count the number of elements that satisfy given condition
}
\examples{
x <- list(p1 = list(type='A',score=list(c1=10,c2=8)),
p2 = list(type='B',score=list(c1=9,c2=9)),
p3 = list(type='B',score=list(c1=9,c2=7)))
list.count(x, type=='B')
list.count(x, min(unlist(score)) >= 9)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/Transformation.R
\name{Distribute}
\alias{Distribute}
\title{Find all the distinct ways one can take one element from each input set.}
\usage{
Distribute(input_list)
}
\arguments{
\item{input_list}{a list of driver sets/therapeutic sets}
}
\value{
a list containing every possible combination of one element
}
\description{
Find all the distinct ways one can take one element from each input set.
}
\details{
The function removes identical combinations from each input set
and returns the unique ones such that they are lexicographically sorted
}
| /man/Distribute.Rd | no_license | professorbeautiful/DriverTools | R | false | false | 636 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/Transformation.R
\name{Distribute}
\alias{Distribute}
\title{Find all the distinct ways one can take one element from each input set.}
\usage{
Distribute(input_list)
}
\arguments{
\item{input_list}{a list of driver sets/therapeutic sets}
}
\value{
a list containing every possible combination of one element
}
\description{
Find all the distinct ways one can take one element from each input set.
}
\details{
The function removes identical combinations from each input set
and returns the unique ones such that they are lexicographically sorted
}
|
#计算gene GO语义相似性
#date:2019/6/20
#author:yj
setwd("D:/Science/MS/data/MS_PPI") #设置工作路径
library(igraph)
edge61_162<-read.csv("string_interactions 61+162.tsv",header = T,sep = "\t")
ms<- make_graph(t(edge61_162[,1:2]),directed = FALSE)
gene26<-read.csv("gene26.txt",header = F)
gene26a<-as.vector(t(gene26))
gene27<-read.csv("gene27.txt",header = F)
#install.packages("GOSemSim")
library(GOSemSim)
hsGO <- godata('org.Hs.eg.db', ont="BP")
hsGO2 <- godata('org.Hs.eg.db', keytype = "SYMBOL", ont="BP", computeIC=FALSE)
gene27_gosim<-gene27
assign(paste("MS","TNFAIP3",sep = "_"),ms-vertices(gene26a))
neib_TNFAIP3<-names(neighbors(MS_TNFAIP3,"TNFAIP3")) #获取name即为邻居
sim<-mgeneSim(c("TNFAIP3",neib_TNFAIP3),semData=hsGO2, measure="Wang")
gene27_gosim[1,2]<-sum(sim[1,])-1#特殊节点
for (i in 1:26) {
ms_minus<-ms-vertices(gene26a[-i])
graph<-assign(paste("MS",gene26a[i],sep = "_"),ms_final)
if (degree(ms_minus,gene26a[i])==0) {
gene27_gosim[i+1,2]<-NA
}else{
neib<-names(neighbors(ms_minus,c(gene26a[i])))
sim<-mgeneSim(c(gene26a[i],neib),semData=hsGO2, measure="Wang")
gene27_gosim[i+1,2]<-sum(sim[1,])-1
}
}
write.csv(gene27_gosim,"gene27_gosim.csv")
| /gosim.R | no_license | windforclouds/PS-V2N | R | false | false | 1,225 | r | #计算gene GO语义相似性
#date:2019/6/20
#author:yj
setwd("D:/Science/MS/data/MS_PPI") #设置工作路径
library(igraph)
edge61_162<-read.csv("string_interactions 61+162.tsv",header = T,sep = "\t")
ms<- make_graph(t(edge61_162[,1:2]),directed = FALSE)
gene26<-read.csv("gene26.txt",header = F)
gene26a<-as.vector(t(gene26))
gene27<-read.csv("gene27.txt",header = F)
#install.packages("GOSemSim")
library(GOSemSim)
hsGO <- godata('org.Hs.eg.db', ont="BP")
hsGO2 <- godata('org.Hs.eg.db', keytype = "SYMBOL", ont="BP", computeIC=FALSE)
gene27_gosim<-gene27
assign(paste("MS","TNFAIP3",sep = "_"),ms-vertices(gene26a))
neib_TNFAIP3<-names(neighbors(MS_TNFAIP3,"TNFAIP3")) #获取name即为邻居
sim<-mgeneSim(c("TNFAIP3",neib_TNFAIP3),semData=hsGO2, measure="Wang")
gene27_gosim[1,2]<-sum(sim[1,])-1#特殊节点
for (i in 1:26) {
ms_minus<-ms-vertices(gene26a[-i])
graph<-assign(paste("MS",gene26a[i],sep = "_"),ms_final)
if (degree(ms_minus,gene26a[i])==0) {
gene27_gosim[i+1,2]<-NA
}else{
neib<-names(neighbors(ms_minus,c(gene26a[i])))
sim<-mgeneSim(c(gene26a[i],neib),semData=hsGO2, measure="Wang")
gene27_gosim[i+1,2]<-sum(sim[1,])-1
}
}
write.csv(gene27_gosim,"gene27_gosim.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sfcr_validate.R
\name{.abort_water_leakc}
\alias{.abort_water_leakc}
\title{Abort if column validation is not fulfilled}
\usage{
.abort_water_leakc(c2names, which)
}
\arguments{
\item{c2names}{Names of offending columns}
\item{which}{Balance-sheet or transactions-flow matrix?}
}
\description{
Abort if column validation is not fulfilled
}
\author{
João Macalós
}
\keyword{internal}
| /man/dot-abort_water_leakc.Rd | permissive | markushlang/sfcr | R | false | true | 464 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sfcr_validate.R
\name{.abort_water_leakc}
\alias{.abort_water_leakc}
\title{Abort if column validation is not fulfilled}
\usage{
.abort_water_leakc(c2names, which)
}
\arguments{
\item{c2names}{Names of offending columns}
\item{which}{Balance-sheet or transactions-flow matrix?}
}
\description{
Abort if column validation is not fulfilled
}
\author{
João Macalós
}
\keyword{internal}
|
pcongmul <- function(thres, alpha, theta, vari, vres){
nume <- thres - (alpha %*% theta)
tmp1 <- sqrt(alpha %*% transpose(alpha))
tmp2 <- tmp1 * (sqrt(vari+vres))
tmp3 <- (-1) * (nume/tmp2)
n <- exp(1.702*tmp3)
d <- n / (1+n)
}
| /R/pcongmul.R | no_license | cran/InDisc | R | false | false | 254 | r | pcongmul <- function(thres, alpha, theta, vari, vres){
nume <- thres - (alpha %*% theta)
tmp1 <- sqrt(alpha %*% transpose(alpha))
tmp2 <- tmp1 * (sqrt(vari+vres))
tmp3 <- (-1) * (nume/tmp2)
n <- exp(1.702*tmp3)
d <- n / (1+n)
}
|
# run this script typing in your terinal:
#R CMD BATCH 1.Subsampling_GridSearch_Post.R > 1.Subsampling_GridSearch_Post.R.out
## Create output dirs
system(paste0("mkdir ", getwd(), "/results"))
system(paste0("mkdir ", getwd(), "/results/RoutFiles"))
## Load libraries and functions
library(ape)
library(partitions)
library(phrapl)
## migrationArray
load(url("https://github.com/ariadnamorales/phrapl-manual/raw/master/data/sensitivityAnalyses/example/input/MigrationArray_2pop_3K.rda"))
##
currentAssign<-read.table(file="https://raw.githubusercontent.com/ariadnamorales/phrapl-manual/master/data/sensitivityAnalyses/example_with_outputFiles/input/Pleth_align.txt", head=TRUE)
currentTrees<-ape::read.tree("https://raw.githubusercontent.com/ariadnamorales/phrapl-manual/master/data/sensitivityAnalyses/example_with_outputFiles/input/Pleth_bestTree.tre")
########################
### 1. Subsampling ####
########################
## Define arguments
subsamplesPerGene<-10
nloci<-5
popAssignments<-list(c(3,3))
## Do subsampling
observedTrees<-PrepSubsampling(assignmentsGlobal=currentAssign,observedTrees=currentTrees,
popAssignments=popAssignments,subsamplesPerGene=subsamplesPerGene,outgroup=FALSE,outgroupPrune=FALSE)
### You will see this error message:
### Warning messages:
# 1: In PrepSubsampling(assignmentsGlobal = currentAssign, observedTrees = currentTrees, :
# Warning: Tree number 1 contains tip names not included in the inputted assignment file. These tips will not be subsampled.
### This is because we exclude a few individuals from the Assignment File to reduce computational time for the sake of this tutorial.
## Get subsample weights for phrapl
subsampleWeights.df<-GetPermutationWeightsAcrossSubsamples(popAssignments=popAssignments,observedTrees=observedTrees)
## Save subsampled Trees and weights
save(list=c("observedTrees","subsampleWeights.df"),file=paste0(getwd(),"/phraplInput_Pleth.rda"))
######################
### 2. GridSearch ###
######################
## Search details
modelRange<-c(1:5)
popAssignments<-list(c(3,3))
nTrees<-100
subsamplesPerGene<-10
totalPopVector<-list(c(4,4)) ## total number of indvs per pop
popScaling<-c(0.25, 1, 1, 1, 1) ## IMPORTANT: one of these loci is haploid
## Run search and keep track of the time
startTime<-as.numeric(Sys.time())
result<-GridSearch(modelRange=modelRange,
migrationArray=migrationArray,
popAssignments=popAssignments,
nTrees=nTrees,
observedTree=observedTrees,
subsampleWeights.df=subsampleWeights.df,
print.ms.string=TRUE,
print.results=TRUE,
debug=TRUE,return.all=TRUE,
collapseStarts=c(0.30,0.58,1.11,2.12,4.07),
migrationStarts=c(0.10,0.22,0.46,1.00,2.15),
subsamplesPerGene=subsamplesPerGene,
totalPopVector=totalPopVector,
popScaling=popScaling, ## IMPORTANT: one of these loci is haploid
print.matches=TRUE)
# Grid list for Rout file
gridList<-result[[1]]
#Get elapsed time
stopTime<-as.numeric(Sys.time()) #stop system timer
elapsedSecs<-stopTime - startTime #elapsed time in hours
elapsedHrs<-(elapsedSecs / 60) / 60 #convert to hours
elapsedDays<-elapsedHrs / 24 #convert to days
#Save the workspace and cleaning
save(list=ls(), file=paste0(getwd(),"/results/Pleth_",min(modelRange),"_",max(modelRange),".rda"))
system(paste0("mv ", getwd(), "/1.Subsampling_GridSearch_Post.Rout ", getwd(), "/results/RoutFiles/1.Subsampling_GridSearch_Post.Rout"))
system(paste0("rm ", getwd(), "/1.Subsampling_GridSearch_Post.R.out"))
| /data/exampleData/1.Subsampling_GridSearch_Post.R | no_license | ariadnamorales/phrapl-manual | R | false | false | 3,497 | r | # run this script typing in your terinal:
#R CMD BATCH 1.Subsampling_GridSearch_Post.R > 1.Subsampling_GridSearch_Post.R.out
## Create output dirs
system(paste0("mkdir ", getwd(), "/results"))
system(paste0("mkdir ", getwd(), "/results/RoutFiles"))
## Load libraries and functions
library(ape)
library(partitions)
library(phrapl)
## migrationArray
load(url("https://github.com/ariadnamorales/phrapl-manual/raw/master/data/sensitivityAnalyses/example/input/MigrationArray_2pop_3K.rda"))
##
currentAssign<-read.table(file="https://raw.githubusercontent.com/ariadnamorales/phrapl-manual/master/data/sensitivityAnalyses/example_with_outputFiles/input/Pleth_align.txt", head=TRUE)
currentTrees<-ape::read.tree("https://raw.githubusercontent.com/ariadnamorales/phrapl-manual/master/data/sensitivityAnalyses/example_with_outputFiles/input/Pleth_bestTree.tre")
########################
### 1. Subsampling ####
########################
## Define arguments
subsamplesPerGene<-10
nloci<-5
popAssignments<-list(c(3,3))
## Do subsampling
observedTrees<-PrepSubsampling(assignmentsGlobal=currentAssign,observedTrees=currentTrees,
popAssignments=popAssignments,subsamplesPerGene=subsamplesPerGene,outgroup=FALSE,outgroupPrune=FALSE)
### You will see this error message:
### Warning messages:
# 1: In PrepSubsampling(assignmentsGlobal = currentAssign, observedTrees = currentTrees, :
# Warning: Tree number 1 contains tip names not included in the inputted assignment file. These tips will not be subsampled.
### This is because we exclude a few individuals from the Assignment File to reduce computational time for the sake of this tutorial.
## Get subsample weights for phrapl
subsampleWeights.df<-GetPermutationWeightsAcrossSubsamples(popAssignments=popAssignments,observedTrees=observedTrees)
## Save subsampled Trees and weights
save(list=c("observedTrees","subsampleWeights.df"),file=paste0(getwd(),"/phraplInput_Pleth.rda"))
######################
### 2. GridSearch ###
######################
## Search details
modelRange<-c(1:5)
popAssignments<-list(c(3,3))
nTrees<-100
subsamplesPerGene<-10
totalPopVector<-list(c(4,4)) ## total number of indvs per pop
popScaling<-c(0.25, 1, 1, 1, 1) ## IMPORTANT: one of these loci is haploid
## Run search and keep track of the time
startTime<-as.numeric(Sys.time())
result<-GridSearch(modelRange=modelRange,
migrationArray=migrationArray,
popAssignments=popAssignments,
nTrees=nTrees,
observedTree=observedTrees,
subsampleWeights.df=subsampleWeights.df,
print.ms.string=TRUE,
print.results=TRUE,
debug=TRUE,return.all=TRUE,
collapseStarts=c(0.30,0.58,1.11,2.12,4.07),
migrationStarts=c(0.10,0.22,0.46,1.00,2.15),
subsamplesPerGene=subsamplesPerGene,
totalPopVector=totalPopVector,
popScaling=popScaling, ## IMPORTANT: one of these loci is haploid
print.matches=TRUE)
# Grid list for Rout file
gridList<-result[[1]]
#Get elapsed time
stopTime<-as.numeric(Sys.time()) #stop system timer
elapsedSecs<-stopTime - startTime #elapsed time in hours
elapsedHrs<-(elapsedSecs / 60) / 60 #convert to hours
elapsedDays<-elapsedHrs / 24 #convert to days
#Save the workspace and cleaning
save(list=ls(), file=paste0(getwd(),"/results/Pleth_",min(modelRange),"_",max(modelRange),".rda"))
system(paste0("mv ", getwd(), "/1.Subsampling_GridSearch_Post.Rout ", getwd(), "/results/RoutFiles/1.Subsampling_GridSearch_Post.Rout"))
system(paste0("rm ", getwd(), "/1.Subsampling_GridSearch_Post.R.out"))
|
library(enetLTS)
### Name: weights.enetLTS
### Title: binary weights from the '"enetLTS"' object
### Aliases: weights.enetLTS
### Keywords: regression classification
### ** Examples
## for gaussian
set.seed(86)
n <- 100; p <- 25 # number of observations and variables
beta <- rep(0,p); beta[1:6] <- 1 # 10% nonzero coefficients
sigma <- 0.5 # controls signal-to-noise ratio
x <- matrix(rnorm(n*p, sigma),nrow=n)
e <- rnorm(n,0,1) # error terms
eps <- 0.1 # contamination level
m <- ceiling(eps*n) # observations to be contaminated
eout <- e; eout[1:m] <- eout[1:m] + 10 # vertical outliers
yout <- c(x %*% beta + sigma * eout) # response
xout <- x; xout[1:m,] <- xout[1:m,] + 10 # bad leverage points
## No test:
fit1 <- enetLTS(xout,yout,alphas=0.5,lambdas=0.05,plot=FALSE)
weights(fit1)
weights(fit1,vers="raw",index=TRUE)
weights(fit1,vers="both",index=TRUE)
## End(No test)
## for binomial
eps <-0.05 # %10 contamination to only class 0
m <- ceiling(eps*n)
y <- sample(0:1,n,replace=TRUE)
xout <- x
xout[y==0,][1:m,] <- xout[1:m,] + 10; # class 0
yout <- y # wrong classification for vertical outliers
## Don't show:
set.seed(86)
n <- 5; p <- 15
beta <- rep(0,p); beta[1:6] <- 1
sigma <- 0.5
x <- matrix(rnorm(n*p, sigma),nrow=n)
e <- rnorm(n,0,1) # error terms
eps <- 0.1 # contamination level
m <- ceiling(eps*n) # observations to be contaminated
eout <- e; eout[1:m] <- eout[1:m] + 10 # vertical outliers
yout <- c(x %*% beta + sigma * eout) # response
xout <- x; xout[1:m,] <- xout[1:m,] + 10 # bad leverage points
fit2 <- enetLTS(xout,yout,alphas=0.5,lambdas=0.05,plot=FALSE)
weights(fit2)
## End(Don't show)
## No test:
fit2 <- enetLTS(xout,yout,family="binomial",alphas=0.5,lambdas=0.05,plot=FALSE)
weights(fit2)
weights(fit2,vers="raw",index=TRUE)
weights(fit2,vers="both",index=TRUE)
## End(No test)
| /data/genthat_extracted_code/enetLTS/examples/weights.enetLTS.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 2,274 | r | library(enetLTS)
### Name: weights.enetLTS
### Title: binary weights from the '"enetLTS"' object
### Aliases: weights.enetLTS
### Keywords: regression classification
### ** Examples
## for gaussian
set.seed(86)
n <- 100; p <- 25 # number of observations and variables
beta <- rep(0,p); beta[1:6] <- 1 # 10% nonzero coefficients
sigma <- 0.5 # controls signal-to-noise ratio
x <- matrix(rnorm(n*p, sigma),nrow=n)
e <- rnorm(n,0,1) # error terms
eps <- 0.1 # contamination level
m <- ceiling(eps*n) # observations to be contaminated
eout <- e; eout[1:m] <- eout[1:m] + 10 # vertical outliers
yout <- c(x %*% beta + sigma * eout) # response
xout <- x; xout[1:m,] <- xout[1:m,] + 10 # bad leverage points
## No test:
fit1 <- enetLTS(xout,yout,alphas=0.5,lambdas=0.05,plot=FALSE)
weights(fit1)
weights(fit1,vers="raw",index=TRUE)
weights(fit1,vers="both",index=TRUE)
## End(No test)
## for binomial
eps <-0.05 # %10 contamination to only class 0
m <- ceiling(eps*n)
y <- sample(0:1,n,replace=TRUE)
xout <- x
xout[y==0,][1:m,] <- xout[1:m,] + 10; # class 0
yout <- y # wrong classification for vertical outliers
## Don't show:
set.seed(86)
n <- 5; p <- 15
beta <- rep(0,p); beta[1:6] <- 1
sigma <- 0.5
x <- matrix(rnorm(n*p, sigma),nrow=n)
e <- rnorm(n,0,1) # error terms
eps <- 0.1 # contamination level
m <- ceiling(eps*n) # observations to be contaminated
eout <- e; eout[1:m] <- eout[1:m] + 10 # vertical outliers
yout <- c(x %*% beta + sigma * eout) # response
xout <- x; xout[1:m,] <- xout[1:m,] + 10 # bad leverage points
fit2 <- enetLTS(xout,yout,alphas=0.5,lambdas=0.05,plot=FALSE)
weights(fit2)
## End(Don't show)
## No test:
fit2 <- enetLTS(xout,yout,family="binomial",alphas=0.5,lambdas=0.05,plot=FALSE)
weights(fit2)
weights(fit2,vers="raw",index=TRUE)
weights(fit2,vers="both",index=TRUE)
## End(No test)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean_datasets.R
\name{CompareToReferenceDataset}
\alias{CompareToReferenceDataset}
\title{Test if column is identical with reference dataset}
\usage{
CompareToReferenceDataset(
tocompareset,
refset,
name,
sortcol = "Plot",
tolerance = 1e-04
)
}
\arguments{
\item{tocompareset}{name of the dataset which is compared (dataset 1). Is a data.table.}
\item{refset}{name of the reference dataset (dataset 2). This dataset can be much larger
than \code{tocompareset}, only the two columns of interest are filtered out.
Is a data.table.}
\item{name}{names of the columns which will be compared.}
\item{sortcol}{name of the column which is used to sort rows}
}
\value{
Boolean value which indicates wheter the columns are identical or not.
}
\description{
Tests wether the column given is identical with another column from a reference dataset.
The order of rows is not identical in both columns, therefore a simple comparison with
\code{identical()} is not possible.
The columns compared have the same name "name".
Both datasets have a column (e.g. with the name "Plot"), by which they can be sorted.
Is used in BetaDivMultifun to test wether the dataset content is identical with
the Synthesis dataset "Info_data_EP grass functions & services.xlsx", BExIS number
}
| /man/CompareToReferenceDataset.Rd | permissive | allanecology/BetaDivMultifun | R | false | true | 1,354 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean_datasets.R
\name{CompareToReferenceDataset}
\alias{CompareToReferenceDataset}
\title{Test if column is identical with reference dataset}
\usage{
CompareToReferenceDataset(
tocompareset,
refset,
name,
sortcol = "Plot",
tolerance = 1e-04
)
}
\arguments{
\item{tocompareset}{name of the dataset which is compared (dataset 1). Is a data.table.}
\item{refset}{name of the reference dataset (dataset 2). This dataset can be much larger
than \code{tocompareset}, only the two columns of interest are filtered out.
Is a data.table.}
\item{name}{names of the columns which will be compared.}
\item{sortcol}{name of the column which is used to sort rows}
}
\value{
Boolean value which indicates wheter the columns are identical or not.
}
\description{
Tests wether the column given is identical with another column from a reference dataset.
The order of rows is not identical in both columns, therefore a simple comparison with
\code{identical()} is not possible.
The columns compared have the same name "name".
Both datasets have a column (e.g. with the name "Plot"), by which they can be sorted.
Is used in BetaDivMultifun to test wether the dataset content is identical with
the Synthesis dataset "Info_data_EP grass functions & services.xlsx", BExIS number
}
|
# ================================================
# Step 1 -
# merge nmme data and create usable files
# S. Baker, July 2017
# hasnt been updated to run on hydrofcst...
# ================================================
rm(list=ls())
## Load libraries
library(ncdf4)
library(dplyr)
## Directories
dir_in = '/home/sabaker/s2s/nmme/files/HUC_hcst/'
dir_in_NASA = '/d2/hydrofcst/s2s/nmme_processing/HUC_fcst_iri/'
#dir_out = '/home/sabaker/s2s/nmme/files/R_output/'
dir_out = '/d2/hydrofcst/s2s/nmme_processing/R_output/'
## Input data
var = c('prate', 'tmp2m')
fcsts = c('01','02','03','04','05','06','07','08','09','10','11','12')
models = c('CFSv2', 'CMC1', 'CMC2', 'GFDL', 'GFDL_FLOR', 'NASA-GEOSS2S', 'NCAR_CCSM4')
### Read and save data (takes ~8-9 minutes)
beg_time = Sys.time()
## === Variable loop
for (i in 1:2) {
print(var[i])
df_all = NULL
# naming for NASA model name
if (var[i] == 'prate') { var_j = 'prec' }
if (var[i] == 'tmp2m') { var_j = 'tref' }
## === Model loop
for (k in 1:length(models)) {
print(models[k])
df_model = NULL
## === NMME models with different file formats and locations (most files in sabaker dir)
if (models[k] != 'NASA-GEOSS2S') {
setwd(paste0(dir_in, var[i]))
## === Month loop
for (j in 1:12) {
## === Year loop
for (m in 0:34) {
## read netcdf
file = paste0(var[i],'.',fcsts[j],'0100.ensmean.',models[k],'.fcst.198201-201612.1x1.ITDIM-',m,'.nc')
nc_temp = nc_open(file)
## read variables & combine
var_raw = ncvar_get(nc_temp, var[i]) # [hru (x), timestep (y)]
hru_vec = ncvar_get(nc_temp, 'hru') # ncvar_get works on dimensions as well as variables
yr = 1982 + m
df = cbind(var[i], models[k], yr, j, hru_vec, var_raw)
## merge with previous data
df_model = rbind(df_model, df)
## print errors with files - (makes script slower!)
#r = range(df[,6])
#if (var[i] == 'prate' & (max(as.numeric(r)) > 50 | min(as.numeric(r)) < 0)) { print(paste(r,file)) }
#if (var[i] == 'tmp2m' & (max(as.numeric(r)) > 400 | min(as.numeric(r)) < 200)) { print(paste(r,file)) }
}
## print max and min in model/var combo
r = range(df_model[,6])
if (var[i] == 'prate' & (max(as.numeric(r)) > 50 | min(as.numeric(r)) < 0)) { print(r) }
if (var[i] == 'tmp2m' & (max(as.numeric(r)) > 400 | min(as.numeric(r)) < 200)) { print(r) }
}
} else {
## === NASA model - processed on hydrofcst
setwd(dir_in_NASA)
### === loop through monthly timesteps for NASA model
# process only Jan 1982 - Dec 2016
for ( j in 264:683 ) { #253:684
# read netcdf
file = paste0(models[k], '.', var_j, '.', j, '.nc')
nc_temp = nc_open(file)
## read variables & combine
var_raw = ncvar_get(nc_temp, var[i]) # [hru (x), timestep (y)]
hru_vec = ncvar_get(nc_temp, 'hru') # ncvar_get works on dimensions as well as variables
## get month and year from number
yr_j = floor(j/12)
mon_j = j - yr_j*12 +1
yr = 1960 + yr_j
df = cbind(var[i], models[k], yr, mon_j, hru_vec, var_raw)
## merge with previous data
df_model = rbind(df_model, df[,1:13]) # keep only to lead 7
}
}
setwd(dir_out)
colnames(df_model) <- c('var', 'mdl', 'yr', 'mon', 'hru', 'lead 0', 'lead 1', 'lead 2', 'lead 3', 'lead 4', 'lead 5', 'lead 6', 'lead 7')
saveRDS(df_model, file = paste0(var[i],'_',models[k],'_ensmean.rds'))
df_all = rbind(df_all, df_model)
}
saveRDS(df_all, file = paste0(var[i],'_NMME_ensmean_198201-201612.rds'))
}
Sys.time() - beg_time # total time to run
# save entire data set
#saveRDS(df_all, file = 'NMME_ensmean_198201-201612.rds') | /scripts/nmme_scripts/hcst_scripts/1_merge_nmme_hcst_iri.R | no_license | jemsethio/S2S-Climate-Forecasts-for-Watersheds | R | false | false | 3,977 | r | # ================================================
# Step 1 -
# merge nmme data and create usable files
# S. Baker, July 2017
# hasnt been updated to run on hydrofcst...
# ================================================
rm(list=ls())
## Load libraries
library(ncdf4)
library(dplyr)
## Directories
dir_in = '/home/sabaker/s2s/nmme/files/HUC_hcst/'
dir_in_NASA = '/d2/hydrofcst/s2s/nmme_processing/HUC_fcst_iri/'
#dir_out = '/home/sabaker/s2s/nmme/files/R_output/'
dir_out = '/d2/hydrofcst/s2s/nmme_processing/R_output/'
## Input data
var = c('prate', 'tmp2m')
fcsts = c('01','02','03','04','05','06','07','08','09','10','11','12')
models = c('CFSv2', 'CMC1', 'CMC2', 'GFDL', 'GFDL_FLOR', 'NASA-GEOSS2S', 'NCAR_CCSM4')
### Read and save data (takes ~8-9 minutes)
beg_time = Sys.time()
## === Variable loop
for (i in 1:2) {
print(var[i])
df_all = NULL
# naming for NASA model name
if (var[i] == 'prate') { var_j = 'prec' }
if (var[i] == 'tmp2m') { var_j = 'tref' }
## === Model loop
for (k in 1:length(models)) {
print(models[k])
df_model = NULL
## === NMME models with different file formats and locations (most files in sabaker dir)
if (models[k] != 'NASA-GEOSS2S') {
setwd(paste0(dir_in, var[i]))
## === Month loop
for (j in 1:12) {
## === Year loop
for (m in 0:34) {
## read netcdf
file = paste0(var[i],'.',fcsts[j],'0100.ensmean.',models[k],'.fcst.198201-201612.1x1.ITDIM-',m,'.nc')
nc_temp = nc_open(file)
## read variables & combine
var_raw = ncvar_get(nc_temp, var[i]) # [hru (x), timestep (y)]
hru_vec = ncvar_get(nc_temp, 'hru') # ncvar_get works on dimensions as well as variables
yr = 1982 + m
df = cbind(var[i], models[k], yr, j, hru_vec, var_raw)
## merge with previous data
df_model = rbind(df_model, df)
## print errors with files - (makes script slower!)
#r = range(df[,6])
#if (var[i] == 'prate' & (max(as.numeric(r)) > 50 | min(as.numeric(r)) < 0)) { print(paste(r,file)) }
#if (var[i] == 'tmp2m' & (max(as.numeric(r)) > 400 | min(as.numeric(r)) < 200)) { print(paste(r,file)) }
}
## print max and min in model/var combo
r = range(df_model[,6])
if (var[i] == 'prate' & (max(as.numeric(r)) > 50 | min(as.numeric(r)) < 0)) { print(r) }
if (var[i] == 'tmp2m' & (max(as.numeric(r)) > 400 | min(as.numeric(r)) < 200)) { print(r) }
}
} else {
## === NASA model - processed on hydrofcst
setwd(dir_in_NASA)
### === loop through monthly timesteps for NASA model
# process only Jan 1982 - Dec 2016
for ( j in 264:683 ) { #253:684
# read netcdf
file = paste0(models[k], '.', var_j, '.', j, '.nc')
nc_temp = nc_open(file)
## read variables & combine
var_raw = ncvar_get(nc_temp, var[i]) # [hru (x), timestep (y)]
hru_vec = ncvar_get(nc_temp, 'hru') # ncvar_get works on dimensions as well as variables
## get month and year from number
yr_j = floor(j/12)
mon_j = j - yr_j*12 +1
yr = 1960 + yr_j
df = cbind(var[i], models[k], yr, mon_j, hru_vec, var_raw)
## merge with previous data
df_model = rbind(df_model, df[,1:13]) # keep only to lead 7
}
}
setwd(dir_out)
colnames(df_model) <- c('var', 'mdl', 'yr', 'mon', 'hru', 'lead 0', 'lead 1', 'lead 2', 'lead 3', 'lead 4', 'lead 5', 'lead 6', 'lead 7')
saveRDS(df_model, file = paste0(var[i],'_',models[k],'_ensmean.rds'))
df_all = rbind(df_all, df_model)
}
saveRDS(df_all, file = paste0(var[i],'_NMME_ensmean_198201-201612.rds'))
}
Sys.time() - beg_time # total time to run
# save entire data set
#saveRDS(df_all, file = 'NMME_ensmean_198201-201612.rds') |
# plot2.R
# Created: 1/9/2016
# -----------------------------------------------------------------------------
# Purpose: For completion of Coursera Class
# Exploratory Data Analysis
# Objective:
# 1) read in 20MB file household_power_consumption.txt
# 2) Create line chart of Global Active Power (kilowatts) vs. time
# png File: Width of 480px and Height 480px.
# -----------------------------------------------------------------------------
# Input File: household_power_consumption.txt
# Date: Date in format dd/mm/yyyy
# Time: time in format hh:mm:ss
# Global_active_power: household global minute-averaged active power (in kilowatt)
# Global_reactive_power: household global minute-averaged reactive power (in kilowatt)
# Voltage: minute-averaged voltage (in volt)
# Global_intensity: household global minute-averaged current intensity (in ampere)
# Sub_metering_1: energy sub-metering No. 1 (in watt-hour of active energy).
# It corresponds to the kitchen, containing mainly a dishwasher, an oven and a
# microwave (hot plates are not electric but gas powered).
# Sub_metering_2: energy sub-metering No. 2 (in watt-hour of active energy).
# It corresponds to the laundry room, containing a washing-machine, a tumble-drier,
# a refrigerator and a light.
# Sub_metering_3: energy sub-metering No. 3 (in watt-hour of active energy).
# It corresponds to an electric water-heater and an air-conditioner.
# -----------------------------------------------------------------------------
# Additional Notes:
# - The dataset has 2,075,259 rows and 9 columns.
# - We will only be using data from the dates 2007-02-01 and 2007-02-02.
# - missing values are coded as ?
# Read Input File
library(data.table)
inFile <- "household_power_consumption.txt"
DT<-fread(inFile, sep=";", na.strings = "?")
#add datetime column
DT$DATETIME<-as.POSIXct(paste(DT$Date, DT$Time), format="%d/%m/%Y %H:%M:%S")
# Convert Date Column to Date Type only if sorting is desired
DT$Date<-as.Date(DT$Date, "%d/%m/%Y")
DT<-DT[DT$Date %in% c(as.Date("2007-02-01", format = "%Y-%m-%d"),as.Date("2007-02-02", format = "%Y-%m-%d")),]
#plot
png("plot2.png")
plot(DT$DATETIME,DT$Global_active_power, type="n", ylab = "Global Active Power (kilowatts)",xlab ="")
lines(DT$DATETIME,DT$Global_active_power)
dev.off()
| /plot2.R | no_license | cjparisi/ExData_Plotting1 | R | false | false | 2,388 | r | # plot2.R
# Created: 1/9/2016
# -----------------------------------------------------------------------------
# Purpose: For completion of Coursera Class
# Exploratory Data Analysis
# Objective:
# 1) read in 20MB file household_power_consumption.txt
# 2) Create line chart of Global Active Power (kilowatts) vs. time
# png File: Width of 480px and Height 480px.
# -----------------------------------------------------------------------------
# Input File: household_power_consumption.txt
# Date: Date in format dd/mm/yyyy
# Time: time in format hh:mm:ss
# Global_active_power: household global minute-averaged active power (in kilowatt)
# Global_reactive_power: household global minute-averaged reactive power (in kilowatt)
# Voltage: minute-averaged voltage (in volt)
# Global_intensity: household global minute-averaged current intensity (in ampere)
# Sub_metering_1: energy sub-metering No. 1 (in watt-hour of active energy).
# It corresponds to the kitchen, containing mainly a dishwasher, an oven and a
# microwave (hot plates are not electric but gas powered).
# Sub_metering_2: energy sub-metering No. 2 (in watt-hour of active energy).
# It corresponds to the laundry room, containing a washing-machine, a tumble-drier,
# a refrigerator and a light.
# Sub_metering_3: energy sub-metering No. 3 (in watt-hour of active energy).
# It corresponds to an electric water-heater and an air-conditioner.
# -----------------------------------------------------------------------------
# Additional Notes:
# - The dataset has 2,075,259 rows and 9 columns.
# - We will only be using data from the dates 2007-02-01 and 2007-02-02.
# - missing values are coded as ?
# Read Input File
library(data.table)
inFile <- "household_power_consumption.txt"
DT<-fread(inFile, sep=";", na.strings = "?")
#add datetime column
DT$DATETIME<-as.POSIXct(paste(DT$Date, DT$Time), format="%d/%m/%Y %H:%M:%S")
# Convert Date Column to Date Type only if sorting is desired
DT$Date<-as.Date(DT$Date, "%d/%m/%Y")
DT<-DT[DT$Date %in% c(as.Date("2007-02-01", format = "%Y-%m-%d"),as.Date("2007-02-02", format = "%Y-%m-%d")),]
#plot
png("plot2.png")
plot(DT$DATETIME,DT$Global_active_power, type="n", ylab = "Global Active Power (kilowatts)",xlab ="")
lines(DT$DATETIME,DT$Global_active_power)
dev.off()
|
testlist <- list(Beta = 0, CVLinf = -1.37669503797767e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615827845-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 487 | r | testlist <- list(Beta = 0, CVLinf = -1.37669503797767e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
version_info <- list(
"2.11" = list(
version_min = "2.10.0",
version_max = "2.11.1",
path = c("bin", "perl/bin", "MinGW/bin")
),
"2.12" = list(
version_min = "2.12.0",
version_max = "2.12.2",
path = c("bin", "perl/bin", "MinGW/bin", "MinGW64/bin")
),
"2.13" = list(
version_min = "2.13.0",
version_max = "2.13.2",
path = c("bin", "MinGW/bin", "MinGW64/bin")
),
"2.14" = list(
version_min = "2.13.0",
version_max = "2.14.2",
path = c("bin", "MinGW/bin", "MinGW64/bin")
),
"2.15" = list(
version_min = "2.14.2",
version_max = "2.15.1",
path = c("bin", "gcc-4.6.3/bin")
),
"2.16" = list(
version_min = "2.15.2",
version_max = "3.0.0",
path = c("bin", "gcc-4.6.3/bin")
),
"3.0" = list(
version_min = "2.15.2",
version_max = "3.0.99",
path = c("bin", "gcc-4.6.3/bin")
),
"3.1" = list(
version_min = "3.0.0",
version_max = "3.1.99",
path = c("bin", "gcc-4.6.3/bin")
),
"3.2" = list(
version_min = "3.1.0",
version_max = "3.2.99",
path = c("bin", "gcc-4.6.3/bin")
),
"3.3" = list(
version_min = "3.2.0",
version_max = "3.3.99",
path = if (using_gcc49()) {
"bin"
} else {
c("bin", "gcc-4.6.3/bin")
}
),
"3.4" = list(
version_min = "3.3.0",
version_max = "3.6.3",
path = "bin"
),
"3.5" = list(
version_min = "3.3.0",
version_max = "3.6.3",
path = "bin"
),
"4.0" = list(
version_min = "4.0.0",
version_max = "4.1.99",
path = c("usr/bin", "ucrt64/bin")
),
"4.2" = list(
version_min = "4.2.0",
version_max = "4.2.99",
path = "usr/bin"
),
"4.3" = list(
version_min = "4.3.0",
version_max = "99.99.99",
path = "usr/bin"
),
"custom" = list(
version_min = "2.10.0",
version_max = "99.99.99",
path = if (getRversion() >= "4.0.0") "usr/bin" else "bin"
)
)
| /R/rtools-metadata.R | permissive | r-lib/pkgbuild | R | false | false | 1,914 | r | version_info <- list(
"2.11" = list(
version_min = "2.10.0",
version_max = "2.11.1",
path = c("bin", "perl/bin", "MinGW/bin")
),
"2.12" = list(
version_min = "2.12.0",
version_max = "2.12.2",
path = c("bin", "perl/bin", "MinGW/bin", "MinGW64/bin")
),
"2.13" = list(
version_min = "2.13.0",
version_max = "2.13.2",
path = c("bin", "MinGW/bin", "MinGW64/bin")
),
"2.14" = list(
version_min = "2.13.0",
version_max = "2.14.2",
path = c("bin", "MinGW/bin", "MinGW64/bin")
),
"2.15" = list(
version_min = "2.14.2",
version_max = "2.15.1",
path = c("bin", "gcc-4.6.3/bin")
),
"2.16" = list(
version_min = "2.15.2",
version_max = "3.0.0",
path = c("bin", "gcc-4.6.3/bin")
),
"3.0" = list(
version_min = "2.15.2",
version_max = "3.0.99",
path = c("bin", "gcc-4.6.3/bin")
),
"3.1" = list(
version_min = "3.0.0",
version_max = "3.1.99",
path = c("bin", "gcc-4.6.3/bin")
),
"3.2" = list(
version_min = "3.1.0",
version_max = "3.2.99",
path = c("bin", "gcc-4.6.3/bin")
),
"3.3" = list(
version_min = "3.2.0",
version_max = "3.3.99",
path = if (using_gcc49()) {
"bin"
} else {
c("bin", "gcc-4.6.3/bin")
}
),
"3.4" = list(
version_min = "3.3.0",
version_max = "3.6.3",
path = "bin"
),
"3.5" = list(
version_min = "3.3.0",
version_max = "3.6.3",
path = "bin"
),
"4.0" = list(
version_min = "4.0.0",
version_max = "4.1.99",
path = c("usr/bin", "ucrt64/bin")
),
"4.2" = list(
version_min = "4.2.0",
version_max = "4.2.99",
path = "usr/bin"
),
"4.3" = list(
version_min = "4.3.0",
version_max = "99.99.99",
path = "usr/bin"
),
"custom" = list(
version_min = "2.10.0",
version_max = "99.99.99",
path = if (getRversion() >= "4.0.0") "usr/bin" else "bin"
)
)
|
#Lake, McHenry, and Will
mirlw=read.csv("RandomMIRSamplingC.csv")
library(binGroup)
smcty=levels(mirlw$County)[3:5]
mirlw=mirlw[which(mirlw$County%in%smcty),]
mirlw$County=droplevels(mirlw$County)
MIR0=MIR0L=MIR0U=rep(NA,nrow(mirlw))
for(i in 1:nrow(mirlw)){
pb = pooledBin(mirlw$Npos0[i],mirlw$Pop0[i],pt.method = "mir",scale = 1000)
MIR0[i] = pb$p
MIR0L[i] = pb$lcl
MIR0U[i] = pb$ucl
}
mirlw$MIR0=MIR0
mirlw$MIR0U=MIR0U
mirlw$MIR0L=MIR0L
save(mirlw,file="SmallCountyObservations.Rdata") | /small_county.R | no_license | krishnavemuri/MIR_VOI | R | false | false | 493 | r | #Lake, McHenry, and Will
mirlw=read.csv("RandomMIRSamplingC.csv")
library(binGroup)
smcty=levels(mirlw$County)[3:5]
mirlw=mirlw[which(mirlw$County%in%smcty),]
mirlw$County=droplevels(mirlw$County)
MIR0=MIR0L=MIR0U=rep(NA,nrow(mirlw))
for(i in 1:nrow(mirlw)){
pb = pooledBin(mirlw$Npos0[i],mirlw$Pop0[i],pt.method = "mir",scale = 1000)
MIR0[i] = pb$p
MIR0L[i] = pb$lcl
MIR0U[i] = pb$ucl
}
mirlw$MIR0=MIR0
mirlw$MIR0U=MIR0U
mirlw$MIR0L=MIR0L
save(mirlw,file="SmallCountyObservations.Rdata") |
#' Plot time series of glucose colored by rate of change
#'
#' @description
#' The function plot_roc produces a time series plot of glucose values colored
#' by categorized rate of change values
#'
#' @usage
#' plot_roc(data, subjects = NULL, timelag = 15, dt0 = NULL, inter_gap = 45, tz = "")
#'
#' @inheritParams roc
#'
#' @param subjects String or list of strings corresponding to subject names
#' in 'id' column of data. Default is all subjects.
#'
#' @return A time series of glucose values colored by ROC categories per subject
#'
#' @export
#'
#' @details
#' For the default, a time series is produced for each subject in which the glucose values are
#' plotted and colored by ROC categories defined as follows. The breaks for the categories are:
#' c(-Inf, -3, -2, -1, 1, 2, 3, Inf) where the glucose is in mg/dl and the ROC values are in mg/dl/min.
#' A ROC of -5 mg/dl/min will thus be placed in the first category and colored accordingly. The breaks
#' for the categories come from the reference paper below.
#'
#' @author Elizabeth Chun, David Buchanan
#'
#' @references
#' Klonoff, D. C., & Kerr, D. (2017) A Simplified Approach Using Rate of Change Arrows to
#' Adjust Insulin With Real-Time Continuous Glucose Monitoring.
#' \emph{Journal of Diabetes Science and Technology} \strong{11(6)} 1063-1069,
#' \doi{10.1177/1932296817723260}.
#'
#' @examples
#'
#' data(example_data_1_subject)
#' plot_roc(example_data_1_subject)
#'
#' data(example_data_5_subject)
#' plot_roc(example_data_5_subject, subjects = 'Subject 5')
#'
plot_roc <- function(data, subjects = NULL, timelag = 15, dt0 = NULL, inter_gap = 45, tz = ""){
time_single <- function(data) {
data_ip = CGMS2DayByDay(data, dt0 = dt0, inter_gap = inter_gap, tz = tz)
dt0 = data_ip[[3]]
day_one = lubridate::as_datetime(data_ip[[2]][1])
ndays = length(data_ip[[2]])
dti = rep(dt0, ndays * 24 * 60 /dt0)
time_out = day_one + lubridate::minutes(cumsum(dti))
return(time_out)
}
gl = gl_ip = time_ip = id = roc = category = NULL
rm(list = c("gl", "id", "roc", "category", "gl_ip", "time_ip"))
data = check_data_columns(data)
if (!is.null(subjects)) {
data = data[data$id %in% subjects, ]
}
data = data %>%
dplyr::group_by(id) %>%
dplyr::summarise(
time_ip = time_single(data.frame(id, time, gl)),
gl_ip = as.vector(t(CGMS2DayByDay(
data.frame(id, time, gl), dt0 = dt0, inter_gap = inter_gap, tz = tz)[[1]])),
roc = roc(data.frame(id, time, gl), timelag, dt0, inter_gap, tz)$roc,
category = cut(
roc, breaks = c(-Inf, -3, -2, -1, 1, 2, 3, Inf),
labels = c("-Inf to -3", "-3 to -2", "-2 to -1",
"-1 to 1", "1 to 2", "2 to 3", "3 to Inf"))
)
colours = c("-Inf to -3" = "#0025FA", "-3 to -2" = "#197DE3",
"-2 to -1" = "#B3FFF8", "-1 to 1" = "white",
"1 to 2" = "#FEC7B6", "2 to 3" = "#FB5454",
"3 to Inf" = "#9F0909")
ggplot2::ggplot(data = data[complete.cases(data$gl_ip), ],
ggplot2::aes(x = time_ip, y = gl_ip, color = category)) +
ggplot2::geom_point() +
ggplot2::scale_x_datetime(name = 'Date') +
ggplot2::scale_y_continuous(name = 'Blood Glucose') +
ggplot2::facet_wrap(~id, scales = "free_x") +
ggplot2::scale_color_manual(values = colours, na.value = "gray",
name = "Category (mg/dl/min)")
}
| /R/plot_roc.R | no_license | stevebroll/iglu | R | false | false | 3,411 | r | #' Plot time series of glucose colored by rate of change
#'
#' @description
#' The function plot_roc produces a time series plot of glucose values colored
#' by categorized rate of change values
#'
#' @usage
#' plot_roc(data, subjects = NULL, timelag = 15, dt0 = NULL, inter_gap = 45, tz = "")
#'
#' @inheritParams roc
#'
#' @param subjects String or list of strings corresponding to subject names
#' in 'id' column of data. Default is all subjects.
#'
#' @return A time series of glucose values colored by ROC categories per subject
#'
#' @export
#'
#' @details
#' For the default, a time series is produced for each subject in which the glucose values are
#' plotted and colored by ROC categories defined as follows. The breaks for the categories are:
#' c(-Inf, -3, -2, -1, 1, 2, 3, Inf) where the glucose is in mg/dl and the ROC values are in mg/dl/min.
#' A ROC of -5 mg/dl/min will thus be placed in the first category and colored accordingly. The breaks
#' for the categories come from the reference paper below.
#'
#' @author Elizabeth Chun, David Buchanan
#'
#' @references
#' Klonoff, D. C., & Kerr, D. (2017) A Simplified Approach Using Rate of Change Arrows to
#' Adjust Insulin With Real-Time Continuous Glucose Monitoring.
#' \emph{Journal of Diabetes Science and Technology} \strong{11(6)} 1063-1069,
#' \doi{10.1177/1932296817723260}.
#'
#' @examples
#'
#' data(example_data_1_subject)
#' plot_roc(example_data_1_subject)
#'
#' data(example_data_5_subject)
#' plot_roc(example_data_5_subject, subjects = 'Subject 5')
#'
plot_roc <- function(data, subjects = NULL, timelag = 15, dt0 = NULL, inter_gap = 45, tz = ""){
time_single <- function(data) {
data_ip = CGMS2DayByDay(data, dt0 = dt0, inter_gap = inter_gap, tz = tz)
dt0 = data_ip[[3]]
day_one = lubridate::as_datetime(data_ip[[2]][1])
ndays = length(data_ip[[2]])
dti = rep(dt0, ndays * 24 * 60 /dt0)
time_out = day_one + lubridate::minutes(cumsum(dti))
return(time_out)
}
gl = gl_ip = time_ip = id = roc = category = NULL
rm(list = c("gl", "id", "roc", "category", "gl_ip", "time_ip"))
data = check_data_columns(data)
if (!is.null(subjects)) {
data = data[data$id %in% subjects, ]
}
data = data %>%
dplyr::group_by(id) %>%
dplyr::summarise(
time_ip = time_single(data.frame(id, time, gl)),
gl_ip = as.vector(t(CGMS2DayByDay(
data.frame(id, time, gl), dt0 = dt0, inter_gap = inter_gap, tz = tz)[[1]])),
roc = roc(data.frame(id, time, gl), timelag, dt0, inter_gap, tz)$roc,
category = cut(
roc, breaks = c(-Inf, -3, -2, -1, 1, 2, 3, Inf),
labels = c("-Inf to -3", "-3 to -2", "-2 to -1",
"-1 to 1", "1 to 2", "2 to 3", "3 to Inf"))
)
colours = c("-Inf to -3" = "#0025FA", "-3 to -2" = "#197DE3",
"-2 to -1" = "#B3FFF8", "-1 to 1" = "white",
"1 to 2" = "#FEC7B6", "2 to 3" = "#FB5454",
"3 to Inf" = "#9F0909")
ggplot2::ggplot(data = data[complete.cases(data$gl_ip), ],
ggplot2::aes(x = time_ip, y = gl_ip, color = category)) +
ggplot2::geom_point() +
ggplot2::scale_x_datetime(name = 'Date') +
ggplot2::scale_y_continuous(name = 'Blood Glucose') +
ggplot2::facet_wrap(~id, scales = "free_x") +
ggplot2::scale_color_manual(values = colours, na.value = "gray",
name = "Category (mg/dl/min)")
}
|
if(!exists("PNGwidth")){
PNGwidth <- 480
}
if(!exists("PNGheight")){
PNGheight <- 480
}
if(!exists("PNGxlabel")){
PNGxlabel <- ""
}
if(!exists("PNGylabel")){
PNGylabel <- ""
}
if(!exists("x")){
x <- 1:100
}
if(!exists("y")){
y <- rnorm(100)
}
if(!exists("imageName")){
imageName <- "xyplot"
}
df <- data.frame(x,y)
library(ggplot2)
png(paste0(imageName,".png"),width = PNGwidth, height = PNGheight)
g <- ggplot(data= df, aes(x = x,y = y))
g <- g + geom_line()
g <- g + xlab(PNGxlabel)
g <- g + ylab(PNGylabel)
if(exists("y_scale"))
{
g <- g + y_scale
}
print(g)
dev.off()
| /RCode/xyPlot.R | no_license | RodrigoNieves/Tesis | R | false | false | 593 | r | if(!exists("PNGwidth")){
PNGwidth <- 480
}
if(!exists("PNGheight")){
PNGheight <- 480
}
if(!exists("PNGxlabel")){
PNGxlabel <- ""
}
if(!exists("PNGylabel")){
PNGylabel <- ""
}
if(!exists("x")){
x <- 1:100
}
if(!exists("y")){
y <- rnorm(100)
}
if(!exists("imageName")){
imageName <- "xyplot"
}
df <- data.frame(x,y)
library(ggplot2)
png(paste0(imageName,".png"),width = PNGwidth, height = PNGheight)
g <- ggplot(data= df, aes(x = x,y = y))
g <- g + geom_line()
g <- g + xlab(PNGxlabel)
g <- g + ylab(PNGylabel)
if(exists("y_scale"))
{
g <- g + y_scale
}
print(g)
dev.off()
|
# Classification Tree on Wheat Data
##########
# Enter data and do some processing
wheat <- read.csv("C:\\Users\\Tom loughin\\sfuvault\\452 Statistical Learning\\R\\wheat.csv")
head(wheat)
#summary(wheat)
# Variable "type" is the response variable. "class" is another explanatory.
class(wheat$type)
wheat$type = as.factor(wheat$type)
wheat$class = as.factor(wheat$class)
#summary(wheat)
# Create a numerical version of "class" for methods that need numbers
###Not needed for trees
#wheat$classnum <- as.numeric((wheat$class))
# Remove "id"
wheat = wheat[,-1]
#summary(wheat)
############
# Creating TWO sets: 200 train, 75 test
set.seed(67982193)
perm <- sample(x=nrow(wheat))
set1 <- wheat[which(perm <= 200),]
set2 <- wheat[which(perm>200),]
library(rpart)
####################################################################
## Default tree
## Specifying method="class" for classification
## Split criterion is Gini
## Deviance is available through parms=list(split="information")
####################################################################
wh.tree <- rpart(data=set1, type ~ ., method="class", cp=0)
printcp(wh.tree)
round(wh.tree$cptable[,c(2:5,1)],4)
# summary(wh.tree) #Lots of output
# See pdf of this---Note that it IS making splits that improve
# probabilities but do not change classes
library(rpart.plot)
x11(h=10, w=10)
prp(wh.tree, type=1, extra=1, main="Original full tree")
# Plot of the cross-validation for the complexity parameter.
## NOTE: Can be very variable, depending on CV partitioning
x11(h=7, w=10, pointsize=10)
plotcp(wh.tree)
# Find location of minimum error
cpt = wh.tree$cptable
minrow <- which.min(cpt[,4])
# Take geometric mean of cp values at min error and one step up
cplow.min <- cpt[minrow,1]
cpup.min <- ifelse(minrow==1, yes=1, no=cpt[minrow-1,1])
cp.min <- sqrt(cplow.min*cpup.min)
# Find smallest row where error is below +1SE
se.row <- min(which(cpt[,4] < cpt[minrow,4]+cpt[minrow,5]))
# Take geometric mean of cp values at min error and one step up
cplow.1se <- cpt[se.row,1]
cpup.1se <- ifelse(se.row==1, yes=1, no=cpt[se.row-1,1])
cp.1se <- sqrt(cplow.1se*cpup.1se)
# Creating a pruned tree using a selected value of the CP by CV.
wh.prune.cv.1se <- prune(wh.tree, cp=cp.1se)
# Creating a pruned tree using a selected value of the CP by CV.
wh.prune.cv.min <- prune(wh.tree, cp=cp.min)
# Plot the pruned trees
x11(h=12, w=18)
par(mfrow=c(1,2))
prp(wh.prune.cv.1se, type=1, extra=1, main="Pruned CV-1SE tree")
prp(wh.prune.cv.min, type=1, extra=1, main="Pruned CV-min tree")
# Predict results of classification. "Vector" means store class as a number
pred.train.cv.1se <- predict(wh.prune.cv.1se, newdata=set1, type="class")
pred.train.cv.min <- predict(wh.prune.cv.min, newdata=set1, type="class")
pred.train.full <- predict(wh.tree, newdata=set1, type="class")
# Predict results of classification. "Vector" means store class as a number
pred.test.cv.1se <- predict(wh.prune.cv.1se, newdata=set2, type="class")
pred.test.cv.min <- predict(wh.prune.cv.min, newdata=set2, type="class")
pred.test.full <- predict(wh.tree, newdata=set2, type="class")
(misclass.train.cv.1se <- mean(ifelse(pred.train.cv.1se == set1$type, yes=0, no=1)))
(misclass.train.cv.min <- mean(ifelse(pred.train.cv.min == set1$type, yes=0, no=1)))
(misclass.train.full <- mean(ifelse(pred.train.full == set1$type, yes=0, no=1)))
(misclass.test.cv.1se <- mean(ifelse(pred.test.cv.1se == set2$type, yes=0, no=1)))
(misclass.test.cv.min <- mean(ifelse(pred.test.cv.min == set2$type, yes=0, no=1)))
(misclass.test.full <- mean(ifelse(pred.test.full == set2$type, yes=0, no=1)))
# Confusion Matrices
table(set2$type, pred.test.full, dnn=c("Observed","Predicted"))
| /STAT452_Statistical_Learning/452Lecture_Rcode/L20_Tree_Boosting_RandomForest/L20 - Classification Tree Wheat.R | no_license | yiyangd/SFU | R | false | false | 3,817 | r | # Classification Tree on Wheat Data
##########
# Enter data and do some processing
wheat <- read.csv("C:\\Users\\Tom loughin\\sfuvault\\452 Statistical Learning\\R\\wheat.csv")
head(wheat)
#summary(wheat)
# Variable "type" is the response variable. "class" is another explanatory.
class(wheat$type)
wheat$type = as.factor(wheat$type)
wheat$class = as.factor(wheat$class)
#summary(wheat)
# Create a numerical version of "class" for methods that need numbers
###Not needed for trees
#wheat$classnum <- as.numeric((wheat$class))
# Remove "id"
wheat = wheat[,-1]
#summary(wheat)
############
# Creating TWO sets: 200 train, 75 test
set.seed(67982193)
perm <- sample(x=nrow(wheat))
set1 <- wheat[which(perm <= 200),]
set2 <- wheat[which(perm>200),]
library(rpart)
####################################################################
## Default tree
## Specifying method="class" for classification
## Split criterion is Gini
## Deviance is available through parms=list(split="information")
####################################################################
wh.tree <- rpart(data=set1, type ~ ., method="class", cp=0)
printcp(wh.tree)
round(wh.tree$cptable[,c(2:5,1)],4)
# summary(wh.tree) #Lots of output
# See pdf of this---Note that it IS making splits that improve
# probabilities but do not change classes
library(rpart.plot)
x11(h=10, w=10)
prp(wh.tree, type=1, extra=1, main="Original full tree")
# Plot of the cross-validation for the complexity parameter.
## NOTE: Can be very variable, depending on CV partitioning
x11(h=7, w=10, pointsize=10)
plotcp(wh.tree)
# Find location of minimum error
cpt = wh.tree$cptable
minrow <- which.min(cpt[,4])
# Take geometric mean of cp values at min error and one step up
cplow.min <- cpt[minrow,1]
cpup.min <- ifelse(minrow==1, yes=1, no=cpt[minrow-1,1])
cp.min <- sqrt(cplow.min*cpup.min)
# Find smallest row where error is below +1SE
se.row <- min(which(cpt[,4] < cpt[minrow,4]+cpt[minrow,5]))
# Take geometric mean of cp values at min error and one step up
cplow.1se <- cpt[se.row,1]
cpup.1se <- ifelse(se.row==1, yes=1, no=cpt[se.row-1,1])
cp.1se <- sqrt(cplow.1se*cpup.1se)
# Creating a pruned tree using a selected value of the CP by CV.
wh.prune.cv.1se <- prune(wh.tree, cp=cp.1se)
# Creating a pruned tree using a selected value of the CP by CV.
wh.prune.cv.min <- prune(wh.tree, cp=cp.min)
# Plot the pruned trees
x11(h=12, w=18)
par(mfrow=c(1,2))
prp(wh.prune.cv.1se, type=1, extra=1, main="Pruned CV-1SE tree")
prp(wh.prune.cv.min, type=1, extra=1, main="Pruned CV-min tree")
# Predict results of classification. "Vector" means store class as a number
pred.train.cv.1se <- predict(wh.prune.cv.1se, newdata=set1, type="class")
pred.train.cv.min <- predict(wh.prune.cv.min, newdata=set1, type="class")
pred.train.full <- predict(wh.tree, newdata=set1, type="class")
# Predict results of classification. "Vector" means store class as a number
pred.test.cv.1se <- predict(wh.prune.cv.1se, newdata=set2, type="class")
pred.test.cv.min <- predict(wh.prune.cv.min, newdata=set2, type="class")
pred.test.full <- predict(wh.tree, newdata=set2, type="class")
(misclass.train.cv.1se <- mean(ifelse(pred.train.cv.1se == set1$type, yes=0, no=1)))
(misclass.train.cv.min <- mean(ifelse(pred.train.cv.min == set1$type, yes=0, no=1)))
(misclass.train.full <- mean(ifelse(pred.train.full == set1$type, yes=0, no=1)))
(misclass.test.cv.1se <- mean(ifelse(pred.test.cv.1se == set2$type, yes=0, no=1)))
(misclass.test.cv.min <- mean(ifelse(pred.test.cv.min == set2$type, yes=0, no=1)))
(misclass.test.full <- mean(ifelse(pred.test.full == set2$type, yes=0, no=1)))
# Confusion Matrices
table(set2$type, pred.test.full, dnn=c("Observed","Predicted"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Recommenders.R
\name{MOA_recommender}
\alias{MOA_recommender}
\title{Create a MOA recommendation engine}
\usage{
MOA_recommender(model, control = NULL, ...)
}
\arguments{
\item{model}{character string with a model.
E.g. BRISMFPredictor, BaselinePredictor
The list of known models can be obtained by typing RMOA:::.moaknownmodels.
See the examples and \code{\link{MOAoptions}}.}
\item{control}{an object of class \code{MOAmodelOptions} as obtained by calling \code{\link{MOAoptions}}}
\item{...}{options of parameters passed on to \code{\link{MOAoptions}}, in case \code{control} is left to NULL.
Ignored if \code{control} is supplied}
}
\value{
An object of class \code{MOA_recommender}
}
\description{
Create a MOA recommendation engine
}
\examples{
RMOA:::.moaknownmodels
ctrl <- MOAoptions(model = "BRISMFPredictor", features = 10, lRate=0.002)
brism <- MOA_recommender(model = "BRISMFPredictor", control=ctrl)
brism
MOAoptions(model = "BaselinePredictor")
baseline <- MOA_recommender(model = "BaselinePredictor")
baseline
}
\seealso{
\code{\link{MOAoptions}}
}
| /RMOA/pkg/man/MOA_recommender.Rd | no_license | jwijffels/RMOA | R | false | true | 1,147 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Recommenders.R
\name{MOA_recommender}
\alias{MOA_recommender}
\title{Create a MOA recommendation engine}
\usage{
MOA_recommender(model, control = NULL, ...)
}
\arguments{
\item{model}{character string with a model.
E.g. BRISMFPredictor, BaselinePredictor
The list of known models can be obtained by typing RMOA:::.moaknownmodels.
See the examples and \code{\link{MOAoptions}}.}
\item{control}{an object of class \code{MOAmodelOptions} as obtained by calling \code{\link{MOAoptions}}}
\item{...}{options of parameters passed on to \code{\link{MOAoptions}}, in case \code{control} is left to NULL.
Ignored if \code{control} is supplied}
}
\value{
An object of class \code{MOA_recommender}
}
\description{
Create a MOA recommendation engine
}
\examples{
RMOA:::.moaknownmodels
ctrl <- MOAoptions(model = "BRISMFPredictor", features = 10, lRate=0.002)
brism <- MOA_recommender(model = "BRISMFPredictor", control=ctrl)
brism
MOAoptions(model = "BaselinePredictor")
baseline <- MOA_recommender(model = "BaselinePredictor")
baseline
}
\seealso{
\code{\link{MOAoptions}}
}
|
#' @export
precintcon.plot.pci <- function(
...,
xlab = "Years",
ylab = "PCI",
legend = NULL,
fontsize = 10,
axis.text.color = "black",
export = FALSE,
export.name = "pci_plot.png",
width = 10,
height = 10,
units = "cm"
) {
l <- list(...)
varl <- as.list(match.call()[1:length(l)+1])
if (length(l) > 1 && !export)
par(ask=T)
if (!is.null(legend) && length(varl) != length(legend)) {
stop(paste("legend should has length equals to the number of input data. legend parameter length",
length(legend), ": number of input data", length(varl)))
} else if (!is.null(legend))
varl <- as.vector(legend)
mapply(function(d, n, axis.text.color, fontsize,
xlab, ylab, export, export.name, width, height, units) {
if (is.element("precintcon.pci", class(d)) ||
is.element("precintcon.monthly", class(d)) ||
is.element("precintcon.daily", class(d))) {
if (is.element("precintcon.daily", class(d))) {
d <- as.precintcon.monthly(d)
}
if (is.element("precintcon.monthly", class(d))) {
d <- precintcon.pci.analysis(d)
}
d <- cbind(d, data.frame(dataset=paste(n)))
graph <- ggplot(d, aes_string("year", "pci")) + geom_line(size=.5) +
geom_point(size=2) + xlab(xlab) + ylab(ylab) +
theme(text = element_text(size = fontsize),
axis.text = element_text(color = axis.text.color),
axis.text.x = element_text(angle = 25),
axis.title.x = element_text(vjust = .1)) +
scale_x_continuous(expand = c(.02, .02),
breaks = seq(min(d$year), max(d$year), by = 2)) +
facet_grid(. ~ dataset)
if (!export) {
print(graph)
} else {
export.name <- paste(n, export.name, sep="_")
ggsave(export.name, plot=graph, height=height, width=width, units=units)
}
}
}, l, varl,
axis.text.color = axis.text.color, fontsize = fontsize, width = width, height = height, units = units, MoreArgs = list(xlab = xlab, ylab = ylab,
export = export, export.name = export.name), SIMPLIFY = FALSE)
par(ask=F)
}
| /R/precintcon.plot.pci.r | no_license | lucasvenez/precintcon | R | false | false | 2,180 | r | #' @export
precintcon.plot.pci <- function(
...,
xlab = "Years",
ylab = "PCI",
legend = NULL,
fontsize = 10,
axis.text.color = "black",
export = FALSE,
export.name = "pci_plot.png",
width = 10,
height = 10,
units = "cm"
) {
l <- list(...)
varl <- as.list(match.call()[1:length(l)+1])
if (length(l) > 1 && !export)
par(ask=T)
if (!is.null(legend) && length(varl) != length(legend)) {
stop(paste("legend should has length equals to the number of input data. legend parameter length",
length(legend), ": number of input data", length(varl)))
} else if (!is.null(legend))
varl <- as.vector(legend)
mapply(function(d, n, axis.text.color, fontsize,
xlab, ylab, export, export.name, width, height, units) {
if (is.element("precintcon.pci", class(d)) ||
is.element("precintcon.monthly", class(d)) ||
is.element("precintcon.daily", class(d))) {
if (is.element("precintcon.daily", class(d))) {
d <- as.precintcon.monthly(d)
}
if (is.element("precintcon.monthly", class(d))) {
d <- precintcon.pci.analysis(d)
}
d <- cbind(d, data.frame(dataset=paste(n)))
graph <- ggplot(d, aes_string("year", "pci")) + geom_line(size=.5) +
geom_point(size=2) + xlab(xlab) + ylab(ylab) +
theme(text = element_text(size = fontsize),
axis.text = element_text(color = axis.text.color),
axis.text.x = element_text(angle = 25),
axis.title.x = element_text(vjust = .1)) +
scale_x_continuous(expand = c(.02, .02),
breaks = seq(min(d$year), max(d$year), by = 2)) +
facet_grid(. ~ dataset)
if (!export) {
print(graph)
} else {
export.name <- paste(n, export.name, sep="_")
ggsave(export.name, plot=graph, height=height, width=width, units=units)
}
}
}, l, varl,
axis.text.color = axis.text.color, fontsize = fontsize, width = width, height = height, units = units, MoreArgs = list(xlab = xlab, ylab = ylab,
export = export, export.name = export.name), SIMPLIFY = FALSE)
par(ask=F)
}
|
# Query generator: ga visits colombia
library(lubridate)
# Inputs
inicio <- as.Date("2014-08-25")
final <- as.Date("2014-08-27")
seq <- seq(from = inicio, to=final,by = 1)
#paste(year(seq[20]),sprintf("%02d",month(seq[20])),sprintf("%02d",day(seq[20])),sep="")
query <- "SELECT date, hits.hour,hits.minute, visits, newvisits, transactions from"
query_dia <- "SELECT date, visits, newvisits, transactions from"
query_brand <- "SELECT date as fecha, hits.hour as hora,hits.minute as minute, visits as brandvisits, newvisits, transactions from"
p1 <- "(SELECT date, hits.hour,hits.minute, sum(totals.visits) visits, sum(totals.newVisits) newvisits, sum(totals.transactions) transactions FROM [golden-passkey-615:58093646.ga_sessions_"
p1_dia <- "(SELECT date, sum(totals.visits) visits, sum(totals.newVisits) newvisits, sum(totals.transactions) transactions FROM [golden-passkey-615:58093646.ga_sessions_"
p1_brand <- "(SELECT date, hits.hour,hits.minute, sum(totals.visits) visits, sum(totals.newVisits) newvisits, sum(totals.transactions) transactions FROM [golden-passkey-615:58093646.ga_sessions_"
p2 <- "] where trafficSource.source not like 'Postal%' and trafficSource.source not like 'Hermedia' and trafficSource.source not like 'Ingenious' and hits.time = 0 group by date, hits.hour, hits.minute)"
p2_dia <- "] where trafficSource.source not like 'Postal%' and trafficSource.source not like 'Hermedia' and trafficSource.source not like 'Ingenious' and hits.time = 0 group by date)"
p2_brand <- "] where trafficSource.source not like 'Postal%' and trafficSource.source not like 'Hermedia' and trafficSource.source not like 'Ingenious' and (trafficSource.medium like '%organic%' or trafficSource.source like '%(direct)%' or trafficSource.campaign like '%brand%') and hits.time = 0 group by date, hits.hour, hits.minute)"
for(i in 1:length(seq)){
query <- paste(query,p1,year(seq[i]),sprintf("%02d",month(seq[i])),sprintf("%02d",day(seq[i])),p2,sep="")
query_dia <- paste(query_dia,p1_dia,year(seq[i]),sprintf("%02d",month(seq[i])),sprintf("%02d",day(seq[i])),p2_dia,sep="")
query_brand <- paste(query_brand,p1_brand,year(seq[i]),sprintf("%02d",month(seq[i])),sprintf("%02d",day(seq[i])),p2_brand,sep="")
ifelse( i<length(seq) , query <- paste(query,",",sep="") , query <- paste(query," order by date,hits.hour, hits.minute;",sep=""))
ifelse( i<length(seq) , query_dia <- paste(query_dia,",",sep="") , query_dia <- paste(query_dia," order by date;",sep=""))
ifelse( i<length(seq) , query_brand <- paste(query_brand,",",sep="") , query_brand <- paste(query_brand," order by date,hits.hour, hits.minute;",sep=""))
}
# Crear tabla full
# SELECT date, hits_hour,hits_minute, visits, t1.newvisits newvisits, t1.transactions transactions, t2.brandvisits as brandvisits
# from [golden-passkey-615:export.col_30jun_24ago_act] as t1
# join [golden-passkey-615:export.col_30jun_24ago_brand] as t2
# on t1.date = t2.fecha and t1.hits_hour = t2.hora and t1.hits_minute = t2.minute
# order by date,hits_hour, hits_minute;
| /query_gen_co.R | no_license | juanzinser/TV_comercial_prediction | R | false | false | 3,068 | r | # Query generator: ga visits colombia
library(lubridate)
# Inputs
inicio <- as.Date("2014-08-25")
final <- as.Date("2014-08-27")
seq <- seq(from = inicio, to=final,by = 1)
#paste(year(seq[20]),sprintf("%02d",month(seq[20])),sprintf("%02d",day(seq[20])),sep="")
query <- "SELECT date, hits.hour,hits.minute, visits, newvisits, transactions from"
query_dia <- "SELECT date, visits, newvisits, transactions from"
query_brand <- "SELECT date as fecha, hits.hour as hora,hits.minute as minute, visits as brandvisits, newvisits, transactions from"
p1 <- "(SELECT date, hits.hour,hits.minute, sum(totals.visits) visits, sum(totals.newVisits) newvisits, sum(totals.transactions) transactions FROM [golden-passkey-615:58093646.ga_sessions_"
p1_dia <- "(SELECT date, sum(totals.visits) visits, sum(totals.newVisits) newvisits, sum(totals.transactions) transactions FROM [golden-passkey-615:58093646.ga_sessions_"
p1_brand <- "(SELECT date, hits.hour,hits.minute, sum(totals.visits) visits, sum(totals.newVisits) newvisits, sum(totals.transactions) transactions FROM [golden-passkey-615:58093646.ga_sessions_"
p2 <- "] where trafficSource.source not like 'Postal%' and trafficSource.source not like 'Hermedia' and trafficSource.source not like 'Ingenious' and hits.time = 0 group by date, hits.hour, hits.minute)"
p2_dia <- "] where trafficSource.source not like 'Postal%' and trafficSource.source not like 'Hermedia' and trafficSource.source not like 'Ingenious' and hits.time = 0 group by date)"
p2_brand <- "] where trafficSource.source not like 'Postal%' and trafficSource.source not like 'Hermedia' and trafficSource.source not like 'Ingenious' and (trafficSource.medium like '%organic%' or trafficSource.source like '%(direct)%' or trafficSource.campaign like '%brand%') and hits.time = 0 group by date, hits.hour, hits.minute)"
for(i in 1:length(seq)){
query <- paste(query,p1,year(seq[i]),sprintf("%02d",month(seq[i])),sprintf("%02d",day(seq[i])),p2,sep="")
query_dia <- paste(query_dia,p1_dia,year(seq[i]),sprintf("%02d",month(seq[i])),sprintf("%02d",day(seq[i])),p2_dia,sep="")
query_brand <- paste(query_brand,p1_brand,year(seq[i]),sprintf("%02d",month(seq[i])),sprintf("%02d",day(seq[i])),p2_brand,sep="")
ifelse( i<length(seq) , query <- paste(query,",",sep="") , query <- paste(query," order by date,hits.hour, hits.minute;",sep=""))
ifelse( i<length(seq) , query_dia <- paste(query_dia,",",sep="") , query_dia <- paste(query_dia," order by date;",sep=""))
ifelse( i<length(seq) , query_brand <- paste(query_brand,",",sep="") , query_brand <- paste(query_brand," order by date,hits.hour, hits.minute;",sep=""))
}
# Crear tabla full
# SELECT date, hits_hour,hits_minute, visits, t1.newvisits newvisits, t1.transactions transactions, t2.brandvisits as brandvisits
# from [golden-passkey-615:export.col_30jun_24ago_act] as t1
# join [golden-passkey-615:export.col_30jun_24ago_brand] as t2
# on t1.date = t2.fecha and t1.hits_hour = t2.hora and t1.hits_minute = t2.minute
# order by date,hits_hour, hits_minute;
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cell_data_set.R
\name{new_cell_data_set}
\alias{new_cell_data_set}
\title{Create a new cell_data_set object.}
\usage{
new_cell_data_set(expression_data, cell_metadata = NULL, gene_metadata = NULL)
}
\arguments{
\item{expression_data}{expression data matrix for an experiment, can be a
sparseMatrix.}
\item{cell_metadata}{data frame containing attributes of individual cells,
where \code{row.names(cell_metadata) = colnames(expression_data)}.}
\item{gene_metadata}{data frame containing attributes of features
(e.g. genes), where
\code{row.names(gene_metadata) = row.names(expression_data)}.}
}
\value{
a new cell_data_set object
}
\description{
Create a new cell_data_set object.
}
\examples{
small_a549_colData_df <- readRDS(system.file("extdata",
"small_a549_dex_pdata.rda",
package = "monocle3"))
small_a549_rowData_df <- readRDS(system.file("extdata",
"small_a549_dex_fdata.rda",
package = "monocle3"))
small_a549_exprs <- readRDS(system.file("extdata",
"small_a549_dex_exprs.rda",
package = "monocle3"))
small_a549_exprs <- small_a549_exprs[,row.names(small_a549_colData_df)]
cds <- new_cell_data_set(expression_data = small_a549_exprs,
cell_metadata = small_a549_colData_df,
gene_metadata = small_a549_rowData_df)
}
| /man/new_cell_data_set.Rd | permissive | cole-trapnell-lab/monocle3 | R | false | true | 1,640 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cell_data_set.R
\name{new_cell_data_set}
\alias{new_cell_data_set}
\title{Create a new cell_data_set object.}
\usage{
new_cell_data_set(expression_data, cell_metadata = NULL, gene_metadata = NULL)
}
\arguments{
\item{expression_data}{expression data matrix for an experiment, can be a
sparseMatrix.}
\item{cell_metadata}{data frame containing attributes of individual cells,
where \code{row.names(cell_metadata) = colnames(expression_data)}.}
\item{gene_metadata}{data frame containing attributes of features
(e.g. genes), where
\code{row.names(gene_metadata) = row.names(expression_data)}.}
}
\value{
a new cell_data_set object
}
\description{
Create a new cell_data_set object.
}
\examples{
small_a549_colData_df <- readRDS(system.file("extdata",
"small_a549_dex_pdata.rda",
package = "monocle3"))
small_a549_rowData_df <- readRDS(system.file("extdata",
"small_a549_dex_fdata.rda",
package = "monocle3"))
small_a549_exprs <- readRDS(system.file("extdata",
"small_a549_dex_exprs.rda",
package = "monocle3"))
small_a549_exprs <- small_a549_exprs[,row.names(small_a549_colData_df)]
cds <- new_cell_data_set(expression_data = small_a549_exprs,
cell_metadata = small_a549_colData_df,
gene_metadata = small_a549_rowData_df)
}
|
#' Creates an R Markdown PDF Thesis document
#'
#' This is a function called in output in the YAML of the driver Rmd file
#' to specify the LaTeX template and cls files.
#'
#' @param toc A Boolean (TRUE or FALSE) specifying whether table of contents
#' should be created
#' @param toc_depth A positive integer
#' @param highlight Syntax highlighting style. Supported styles include
#' "default", "tango", "pygments", "kate", "monochrome", "espresso", "zenburn",
#' and "haddock". Pass NULL to prevent syntax highlighting.
#' @param ... additional arguments passed to \code{bookdown::pdf_book}
#'
#' @return A modified \code{pdf_document} based on the Reed Senior Thesis LaTeX
#' template
#'
#' @examples
#' \dontrun{
#' output: hopkinsdown::thesis_pdf
#' }
#'
#' @export
thesis_pdf <- function(toc = TRUE, toc_depth = 3, highlight = "default", ...) {
base <- bookdown::pdf_book(
toc = toc,
toc_depth = toc_depth,
highlight = highlight,
keep_tex = TRUE,
pandoc_args = "--top-level-division=chapter",
...)
# Mostly copied from knitr::render_sweave
base$knitr$opts_chunk$comment <- NA
#base$knitr$opts_chunk$fig.align <- "center"
old_opt <- getOption("bookdown.post.latex")
options(bookdown.post.latex = fix_envs)
on.exit(options(bookdown.post.late = old_opt))
base
}
#' Creates an R Markdown gitbook Thesis document
#'
#' This is a function called in output in the YAML of the driver Rmd file
#' to specify the creation of a webpage version of the thesis.
#'
#' @param ... additional arguments passed to \code{bookdown::gitbook}
#' @export
#' @return A gitbook webpage
#' @examples
#' \dontrun{
#' output: hopkinsdown::thesis_gitbook
#' }
thesis_gitbook <- function(...){
base <- bookdown::gitbook(
split_by = "chapter+number",
config = list(toc = list(collapse = "section",
before = '<li><a href="./"></a></li>',
after = '<li><a href="https://github.com/rstudio/bookdown" target="blank">Published with bookdown</a></li>',
...)
)
)
# Mostly copied from knitr::render_sweave
base$knitr$opts_chunk$comment <- NA
base$knitr$opts_chunk$fig.align <- "center"
base
}
#' Creates an R Markdown Word Thesis document
#'
#' This is a function called in output in the YAML of the driver Rmd file
#' to specify the creation of a Microsoft Word version of the thesis.
#'
#' @param ... additional arguments passed to \code{bookdown::word_document2}
#' @export
#' @return A Word Document based on (hopefully soon, but not currently)
#' the Reed Senior Thesis Word template
#' @examples
#' \dontrun{
#' output: hopkinsdown::thesis_word
#' }
thesis_word <- function(...){
base <- bookdown::word_document2(...)
# Mostly copied from knitr::render_sweave
base$knitr$opts_chunk$comment <- NA
base$knitr$opts_chunk$fig.align <- "center"
base
}
#' Creates an R Markdown epub Thesis document
#'
#' This is a function called in output in the YAML of the driver Rmd file
#' to specify the creation of a epub version of the thesis.
#'
#' @param ... additional arguments passed to \code{bookdown::epub_book}
#' @export
#' @return A ebook version of the thesis
#' @examples
#' \dontrun{
#' output: hopkinsdown::thesis_epub
#' }
thesis_epub <- function(...){
base <- bookdown::epub_book(...)
# Mostly copied from knitr::render_sweave
base$knitr$opts_chunk$comment <- NA
base$knitr$opts_chunk$fig.align <- "center"
base
}
fix_envs = function(x){
beg_reg <- '^\\s*\\\\begin\\{.*\\}'
end_reg <- '^\\s*\\\\end\\{.*\\}'
i3 = if (length(i1 <- grep(beg_reg, x))) (i1 - 1)[grepl("^\\s*$", x[i1 - 1])]
i3 = c(i3,
if (length(i2 <- grep(end_reg, x))) (i2 + 1)[grepl("^\\s*$", x[i2 + 1])]
)
if (length(i3)) x = x[-i3]
x
}
| /R/thesis.R | permissive | Zaxim/hopkinsdown | R | false | false | 3,730 | r | #' Creates an R Markdown PDF Thesis document
#'
#' This is a function called in output in the YAML of the driver Rmd file
#' to specify the LaTeX template and cls files.
#'
#' @param toc A Boolean (TRUE or FALSE) specifying whether table of contents
#' should be created
#' @param toc_depth A positive integer
#' @param highlight Syntax highlighting style. Supported styles include
#' "default", "tango", "pygments", "kate", "monochrome", "espresso", "zenburn",
#' and "haddock". Pass NULL to prevent syntax highlighting.
#' @param ... additional arguments passed to \code{bookdown::pdf_book}
#'
#' @return A modified \code{pdf_document} based on the Reed Senior Thesis LaTeX
#' template
#'
#' @examples
#' \dontrun{
#' output: hopkinsdown::thesis_pdf
#' }
#'
#' @export
thesis_pdf <- function(toc = TRUE, toc_depth = 3, highlight = "default", ...) {
base <- bookdown::pdf_book(
toc = toc,
toc_depth = toc_depth,
highlight = highlight,
keep_tex = TRUE,
pandoc_args = "--top-level-division=chapter",
...)
# Mostly copied from knitr::render_sweave
base$knitr$opts_chunk$comment <- NA
#base$knitr$opts_chunk$fig.align <- "center"
old_opt <- getOption("bookdown.post.latex")
options(bookdown.post.latex = fix_envs)
on.exit(options(bookdown.post.late = old_opt))
base
}
#' Creates an R Markdown gitbook Thesis document
#'
#' This is a function called in output in the YAML of the driver Rmd file
#' to specify the creation of a webpage version of the thesis.
#'
#' @param ... additional arguments passed to \code{bookdown::gitbook}
#' @export
#' @return A gitbook webpage
#' @examples
#' \dontrun{
#' output: hopkinsdown::thesis_gitbook
#' }
thesis_gitbook <- function(...){
base <- bookdown::gitbook(
split_by = "chapter+number",
config = list(toc = list(collapse = "section",
before = '<li><a href="./"></a></li>',
after = '<li><a href="https://github.com/rstudio/bookdown" target="blank">Published with bookdown</a></li>',
...)
)
)
# Mostly copied from knitr::render_sweave
base$knitr$opts_chunk$comment <- NA
base$knitr$opts_chunk$fig.align <- "center"
base
}
#' Creates an R Markdown Word Thesis document
#'
#' This is a function called in output in the YAML of the driver Rmd file
#' to specify the creation of a Microsoft Word version of the thesis.
#'
#' @param ... additional arguments passed to \code{bookdown::word_document2}
#' @export
#' @return A Word Document based on (hopefully soon, but not currently)
#' the Reed Senior Thesis Word template
#' @examples
#' \dontrun{
#' output: hopkinsdown::thesis_word
#' }
thesis_word <- function(...){
base <- bookdown::word_document2(...)
# Mostly copied from knitr::render_sweave
base$knitr$opts_chunk$comment <- NA
base$knitr$opts_chunk$fig.align <- "center"
base
}
#' Creates an R Markdown epub Thesis document
#'
#' This is a function called in output in the YAML of the driver Rmd file
#' to specify the creation of a epub version of the thesis.
#'
#' @param ... additional arguments passed to \code{bookdown::epub_book}
#' @export
#' @return A ebook version of the thesis
#' @examples
#' \dontrun{
#' output: hopkinsdown::thesis_epub
#' }
thesis_epub <- function(...){
base <- bookdown::epub_book(...)
# Mostly copied from knitr::render_sweave
base$knitr$opts_chunk$comment <- NA
base$knitr$opts_chunk$fig.align <- "center"
base
}
fix_envs = function(x){
beg_reg <- '^\\s*\\\\begin\\{.*\\}'
end_reg <- '^\\s*\\\\end\\{.*\\}'
i3 = if (length(i1 <- grep(beg_reg, x))) (i1 - 1)[grepl("^\\s*$", x[i1 - 1])]
i3 = c(i3,
if (length(i2 <- grep(end_reg, x))) (i2 + 1)[grepl("^\\s*$", x[i2 + 1])]
)
if (length(i3)) x = x[-i3]
x
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
s<- NULL
set <- function (y){
x<<- y
s<<- NULL
}
get <- function() x
setsolve <- function(solve) s <<- solve
getsolve <- function () s
list (set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## Write a short comment describing this function
## Return a matrix that is the inverse of 'x'
## This function computes the inverse of the special "matrix" returned
## by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve
## the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getsolve()
if(!is.null(s)) {
message("getting inversed matrix")
return(s)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(s)
s
}
| /cachematrix.R | no_license | IvnOz/ProgrammingAssignment2 | R | false | false | 1,092 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
s<- NULL
set <- function (y){
x<<- y
s<<- NULL
}
get <- function() x
setsolve <- function(solve) s <<- solve
getsolve <- function () s
list (set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## Write a short comment describing this function
## Return a matrix that is the inverse of 'x'
## This function computes the inverse of the special "matrix" returned
## by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve
## the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getsolve()
if(!is.null(s)) {
message("getting inversed matrix")
return(s)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(s)
s
}
|
# Author : Marion Chevrier
# Date : 30/09/2019
# Proj : Run LIGER pipeline
########################
#load packages
#devtools::install_github('MacoskoLab/liger')
library(scales)
library(liger)
library(Matrix)
library(Rtsne)
library(ggplot2)
rm(list=ls())
########################
#settings
var.thresh = 0.1
k = 20
nrep = 3
visualize = T
outfile_prefix = "Dataset7"
save_obj = F
src_dir = "./"
working_dir = "../../Output/"
read_dir = "../../Data/dataset7/"
b1_exprs_filename = "b1_exprs.txt"
b2_exprs_filename = "b2_exprs.txt"
b1_celltype_filename = "b1_celltype.txt"
b2_celltype_filename = "b2_celltype.txt"
batch_label = "batchlb"
celltype_label = "CellType"
########################
# read data
b1_exprs <- read.table(file = paste0(read_dir,b1_exprs_filename),sep="\t",header=T,row.names=1,check.names = F)
b2_exprs <- read.table(file = paste0(read_dir,b2_exprs_filename),sep="\t",header=T,row.names=1,check.names = F)
b1_celltype <- read.table(file = paste0(read_dir,b1_celltype_filename),sep="\t",header=T,row.names=1,check.names = F)
b2_celltype <- read.table(file = paste0(read_dir,b2_celltype_filename),sep="\t",header=T,row.names=1,check.names = F)
b1_celltype$cell <- rownames(b1_celltype)
b1_celltype <- b1_celltype[colnames(b1_exprs),]
b2_celltype$cell <- rownames(b2_celltype)
b2_celltype <- b2_celltype[colnames(b2_exprs),]
b1_metadata <- as.data.frame(b1_celltype)
b2_metadata <- as.data.frame(b2_celltype)
b1_metadata$batch <- 1
b2_metadata$batch <- 2
b1_metadata$batchlb <- 'Batch_1'
b2_metadata$batchlb <- 'Batch_2'
expr_mat = cbind(b1_exprs,b2_exprs)
metadata = rbind(b1_metadata, b2_metadata)
expr_mat <- expr_mat[, rownames(metadata)]
########################
# run pipeline
source(paste0(src_dir,'call_liger.R'))
liger_obj <- liger_preprocess(expr_mat, metadata,
var.thresh=var.thresh,
batch_label = batch_label)
call_liger(liger_obj, metadata, batch_label, celltype_label, k = k, nrep = nrep,
plotout_dir = working_dir, saveout_dir = working_dir, outfilename_prefix = outfile_prefix, visualize = visualize, save_obj = save_obj)
| /Script/LIGER/run_liger_07.R | no_license | JinmiaoChenLab/Batch-effect-removal-benchmarking | R | false | false | 2,143 | r |
# Author : Marion Chevrier
# Date : 30/09/2019
# Proj : Run LIGER pipeline
########################
#load packages
#devtools::install_github('MacoskoLab/liger')
library(scales)
library(liger)
library(Matrix)
library(Rtsne)
library(ggplot2)
rm(list=ls())
########################
#settings
var.thresh = 0.1
k = 20
nrep = 3
visualize = T
outfile_prefix = "Dataset7"
save_obj = F
src_dir = "./"
working_dir = "../../Output/"
read_dir = "../../Data/dataset7/"
b1_exprs_filename = "b1_exprs.txt"
b2_exprs_filename = "b2_exprs.txt"
b1_celltype_filename = "b1_celltype.txt"
b2_celltype_filename = "b2_celltype.txt"
batch_label = "batchlb"
celltype_label = "CellType"
########################
# read data
b1_exprs <- read.table(file = paste0(read_dir,b1_exprs_filename),sep="\t",header=T,row.names=1,check.names = F)
b2_exprs <- read.table(file = paste0(read_dir,b2_exprs_filename),sep="\t",header=T,row.names=1,check.names = F)
b1_celltype <- read.table(file = paste0(read_dir,b1_celltype_filename),sep="\t",header=T,row.names=1,check.names = F)
b2_celltype <- read.table(file = paste0(read_dir,b2_celltype_filename),sep="\t",header=T,row.names=1,check.names = F)
b1_celltype$cell <- rownames(b1_celltype)
b1_celltype <- b1_celltype[colnames(b1_exprs),]
b2_celltype$cell <- rownames(b2_celltype)
b2_celltype <- b2_celltype[colnames(b2_exprs),]
b1_metadata <- as.data.frame(b1_celltype)
b2_metadata <- as.data.frame(b2_celltype)
b1_metadata$batch <- 1
b2_metadata$batch <- 2
b1_metadata$batchlb <- 'Batch_1'
b2_metadata$batchlb <- 'Batch_2'
expr_mat = cbind(b1_exprs,b2_exprs)
metadata = rbind(b1_metadata, b2_metadata)
expr_mat <- expr_mat[, rownames(metadata)]
########################
# run pipeline
source(paste0(src_dir,'call_liger.R'))
liger_obj <- liger_preprocess(expr_mat, metadata,
var.thresh=var.thresh,
batch_label = batch_label)
call_liger(liger_obj, metadata, batch_label, celltype_label, k = k, nrep = nrep,
plotout_dir = working_dir, saveout_dir = working_dir, outfilename_prefix = outfile_prefix, visualize = visualize, save_obj = save_obj)
|
#1부터 10까지 순서가 랜덤하게 숫자가 발생되어 들어간다.
#size=5는 숫자 개수가 5개 들어간다는 말이다.
#replace는 중복허용 유무이다.
#replace = FALSE는 중복을 허용하지 않는다.
#replace = TRUE는 중복을 허용하고 기존에 사용했던걸 다시 사용할 수 있다.
#set.seed는 값이 바뀌지 말라고 설정해 놓은 것이다.
set.seed(121)#1222는 랜덤한 수를 찾아가게 만들어주는 key값이다.
a <- sample(1:10,size = 5,replace=FALSE)
a
#if문의 역할을 하는 함수 - ifelse
set.seed(1221)
ifdf <- data.frame(mynum=1:6,
myval=sample(c("spring","bigdata"),
size = 6,
replace = TRUE))
ifdf
#myval의 값이 spring이면 프로젝트완료, bigdata이면 할꺼야
for(i in 1:nrow(ifdf)){
if(ifdf[i,"myval"]=="spring"){
ifdf[i,"info"] <- "프로젝트완료"
} else {
ifdf[i,"info"] <- "할꺼야"
}
}
ifdf
#위 작업을 함수를 이용해서 할것 - info2
#excel에서 쓰는 ifelse와 동일하다
ifdf[,"info2"] <- ifelse(test=ifdf$myval=="spring",
yes="쉽다",
no="할꺼다")
ifdf
#조건이 두 개 이상인 경우 처리
ifdf[,"info3"] <- ifelse(test=ifdf$myval=="spring",
yes="쉽다",
no=ifelse(test=ifdf$myval=="bigdata",
yes = "머신셋팅"),
no="device셋팅완료"))
ifdf
#ifdf[,"info4"] <- "쉽다"
#ifdf
#ifdf[,"info4"] <- ifelse(test=ifdf$myval=="spring",
# yes="쉽다",
# no=ifelse(test=ifdf$myval=="bigdata",
# yes = "머신셋팅"),
# no="device셋팅완료"))
#ifdf
| /advanced/ifelse_func.R | no_license | junes7/RWork | R | false | false | 1,867 | r | #1부터 10까지 순서가 랜덤하게 숫자가 발생되어 들어간다.
#size=5는 숫자 개수가 5개 들어간다는 말이다.
#replace는 중복허용 유무이다.
#replace = FALSE는 중복을 허용하지 않는다.
#replace = TRUE는 중복을 허용하고 기존에 사용했던걸 다시 사용할 수 있다.
#set.seed는 값이 바뀌지 말라고 설정해 놓은 것이다.
set.seed(121)#1222는 랜덤한 수를 찾아가게 만들어주는 key값이다.
a <- sample(1:10,size = 5,replace=FALSE)
a
#if문의 역할을 하는 함수 - ifelse
set.seed(1221)
ifdf <- data.frame(mynum=1:6,
myval=sample(c("spring","bigdata"),
size = 6,
replace = TRUE))
ifdf
#myval의 값이 spring이면 프로젝트완료, bigdata이면 할꺼야
for(i in 1:nrow(ifdf)){
if(ifdf[i,"myval"]=="spring"){
ifdf[i,"info"] <- "프로젝트완료"
} else {
ifdf[i,"info"] <- "할꺼야"
}
}
ifdf
#위 작업을 함수를 이용해서 할것 - info2
#excel에서 쓰는 ifelse와 동일하다
ifdf[,"info2"] <- ifelse(test=ifdf$myval=="spring",
yes="쉽다",
no="할꺼다")
ifdf
#조건이 두 개 이상인 경우 처리
ifdf[,"info3"] <- ifelse(test=ifdf$myval=="spring",
yes="쉽다",
no=ifelse(test=ifdf$myval=="bigdata",
yes = "머신셋팅"),
no="device셋팅완료"))
ifdf
#ifdf[,"info4"] <- "쉽다"
#ifdf
#ifdf[,"info4"] <- ifelse(test=ifdf$myval=="spring",
# yes="쉽다",
# no=ifelse(test=ifdf$myval=="bigdata",
# yes = "머신셋팅"),
# no="device셋팅완료"))
#ifdf
|
#Draw n samples from exp(lambda)
n = 100
lambda = 2
x = rexp(n,lambda)
#Sufficient statistic
t = sum(x)
#Draw n samples from exp(1)
u = rexp(n,1)
#Estimate the parameter lambda
lambdahat = sum(u)/t
#Conditional sample
xt = u/lambdahat
ks.test(xt,x)
#High p-value suggests that the initial sample x and
#the conditional sample xt are drawn from the same distribution | /Algorithm1exp.R | no_license | rasmuserlemann/Conditional-Monte-Carlo | R | false | false | 372 | r | #Draw n samples from exp(lambda)
n = 100
lambda = 2
x = rexp(n,lambda)
#Sufficient statistic
t = sum(x)
#Draw n samples from exp(1)
u = rexp(n,1)
#Estimate the parameter lambda
lambdahat = sum(u)/t
#Conditional sample
xt = u/lambdahat
ks.test(xt,x)
#High p-value suggests that the initial sample x and
#the conditional sample xt are drawn from the same distribution |
vvarb <-
function(var,varb,N,n) {
vare <- var-varb
return((2/(N-1))*(varb^2 + 2*varb*vare/n + vare^2/(n*(n-1))))
}
| /R/vvarb.R | no_license | cran/SE.IGE | R | false | false | 134 | r | vvarb <-
function(var,varb,N,n) {
vare <- var-varb
return((2/(N-1))*(varb^2 + 2*varb*vare/n + vare^2/(n*(n-1))))
}
|
## This script creates a tidy data set of UCI HAR Data Set
library(dplyr)
library(stringr)
setwd("C:...\getting and cleaning data\\Assignement 4\\UCI HAR Dataset")
# Read training data into a train file
trainFileName <- "train\\X_train.txt"
trainFile<- read.table(trainFileName)
# read test data into a test file
testFileName <- "test\\X_test.txt"
testFile<- read.table(testFileName)
# read training data labels
trainLabelFileName <- "train\\y_train.txt"
trainLabelFile <- read.table(trainLabelFileName, colClasses = "character")
# read test data labels
testLabelFileName <- "test\\y_test.txt"
testLabelFile <- read.table(testLabelFileName, colClasses = "character")
# read the train subject file
trainSubjectFileName <- "train\\subject_train.txt"
trainSubjectFile <- read.table(trainSubjectFileName)
# read the test subject file
testSubjectFileName <- "test\\subject_test.txt"
testSubjectFile <- read.table(testSubjectFileName)
subjectFile <- rbind(trainSubjectFile,testSubjectFile)
# read the feature info file
featureFileName <- "features.txt"
featureFile <- read.table(featureFileName,colClasses = c("numeric","character"),col.names = c("srNo","featureName"))
# read the activity label file
activityLabelFileName <- "activity_labels.txt"
activityLabelFile <- read.table(activityLabelFileName,colClasses = c("numeric","character"),col.names = c("srNo", "activityName"))
trainLabelFile$V1 <- str_replace_all(trainLabelFile$V1,c("1"=activityLabelFile$activityName[1],
"2"=activityLabelFile$activityName[2],
"3"=activityLabelFile$activityName[3],
"4"=activityLabelFile$activityName[4],
"5"=activityLabelFile$activityName[5],
"6"=activityLabelFile$activityName[6]));
testLabelFile$V1 <- str_replace_all(testLabelFile$V1,c("1"=activityLabelFile$activityName[1],
"2"=activityLabelFile$activityName[2],
"3"=activityLabelFile$activityName[3],
"4"=activityLabelFile$activityName[4],
"5"=activityLabelFile$activityName[5],
"6"=activityLabelFile$activityName[6]));
activityLabels <- rbind(trainLabelFile,testLabelFile)
# Select the mean features
meanFeatureIndices <- grep("mean", featureFile$featureName)
# select the std features
stdFeatureIndices <- grep("std", featureFile$featureName)
selectedFeatureIndices <- c(meanFeatureIndices, stdFeatureIndices)
# create cleaned vectors of these feature indices
meanFeatureNames <- featureFile[meanFeatureIndices,]
stdFeatureNames <- featureFile[stdFeatureIndices,]
# Clean the feature Names
meanFeatureNames$featureName <- gsub("-|\\(|\\)","",meanFeatureNames$featureName)
stdFeatureNames$featureName <- gsub("-|\\(|\\)","",stdFeatureNames$featureName)
cleanedFeatureNames <- rbind(meanFeatureNames, stdFeatureNames)
# select the sub set of columns form the test and train data set
targetTrainData <- trainFile[,selectedFeatureIndices]
targetTestData <- testFile[,selectedFeatureIndices]
targetDataSet <- rbind(targetTrainData,targetTestData)
# Append the subject Id and activity Labels in the target data set
targetDataSet <- cbind(subjectFile, activityLabels, targetDataSet)
# Assign the column names of the data frame
colnames(targetDataSet)<- c("subjectID","activityLabel",cleanedFeatureNames$featureName)
write.table(targetDataSet, file = "HARTidyDataSet.")
averageDataset <- aggregate(. ~subjectID + activityLabel, targetDataSet, mean)
averageDataset <- averageDataset[order(averageDataset$subjectID, averageDataset$activityLabel),]
write.table(averageDataset, file = "averageDatasetBySubjectAndActivity.txt")
| /run_analysis.R | no_license | rasikaWagh/Getting_and_cleaning_data_course_project | R | false | false | 4,039 | r | ## This script creates a tidy data set of UCI HAR Data Set
library(dplyr)
library(stringr)
setwd("C:...\getting and cleaning data\\Assignement 4\\UCI HAR Dataset")
# Read training data into a train file
trainFileName <- "train\\X_train.txt"
trainFile<- read.table(trainFileName)
# read test data into a test file
testFileName <- "test\\X_test.txt"
testFile<- read.table(testFileName)
# read training data labels
trainLabelFileName <- "train\\y_train.txt"
trainLabelFile <- read.table(trainLabelFileName, colClasses = "character")
# read test data labels
testLabelFileName <- "test\\y_test.txt"
testLabelFile <- read.table(testLabelFileName, colClasses = "character")
# read the train subject file
trainSubjectFileName <- "train\\subject_train.txt"
trainSubjectFile <- read.table(trainSubjectFileName)
# read the test subject file
testSubjectFileName <- "test\\subject_test.txt"
testSubjectFile <- read.table(testSubjectFileName)
subjectFile <- rbind(trainSubjectFile,testSubjectFile)
# read the feature info file
featureFileName <- "features.txt"
featureFile <- read.table(featureFileName,colClasses = c("numeric","character"),col.names = c("srNo","featureName"))
# read the activity label file
activityLabelFileName <- "activity_labels.txt"
activityLabelFile <- read.table(activityLabelFileName,colClasses = c("numeric","character"),col.names = c("srNo", "activityName"))
trainLabelFile$V1 <- str_replace_all(trainLabelFile$V1,c("1"=activityLabelFile$activityName[1],
"2"=activityLabelFile$activityName[2],
"3"=activityLabelFile$activityName[3],
"4"=activityLabelFile$activityName[4],
"5"=activityLabelFile$activityName[5],
"6"=activityLabelFile$activityName[6]));
testLabelFile$V1 <- str_replace_all(testLabelFile$V1,c("1"=activityLabelFile$activityName[1],
"2"=activityLabelFile$activityName[2],
"3"=activityLabelFile$activityName[3],
"4"=activityLabelFile$activityName[4],
"5"=activityLabelFile$activityName[5],
"6"=activityLabelFile$activityName[6]));
activityLabels <- rbind(trainLabelFile,testLabelFile)
# Select the mean features
meanFeatureIndices <- grep("mean", featureFile$featureName)
# select the std features
stdFeatureIndices <- grep("std", featureFile$featureName)
selectedFeatureIndices <- c(meanFeatureIndices, stdFeatureIndices)
# create cleaned vectors of these feature indices
meanFeatureNames <- featureFile[meanFeatureIndices,]
stdFeatureNames <- featureFile[stdFeatureIndices,]
# Clean the feature Names
meanFeatureNames$featureName <- gsub("-|\\(|\\)","",meanFeatureNames$featureName)
stdFeatureNames$featureName <- gsub("-|\\(|\\)","",stdFeatureNames$featureName)
cleanedFeatureNames <- rbind(meanFeatureNames, stdFeatureNames)
# select the sub set of columns form the test and train data set
targetTrainData <- trainFile[,selectedFeatureIndices]
targetTestData <- testFile[,selectedFeatureIndices]
targetDataSet <- rbind(targetTrainData,targetTestData)
# Append the subject Id and activity Labels in the target data set
targetDataSet <- cbind(subjectFile, activityLabels, targetDataSet)
# Assign the column names of the data frame
colnames(targetDataSet)<- c("subjectID","activityLabel",cleanedFeatureNames$featureName)
write.table(targetDataSet, file = "HARTidyDataSet.")
averageDataset <- aggregate(. ~subjectID + activityLabel, targetDataSet, mean)
averageDataset <- averageDataset[order(averageDataset$subjectID, averageDataset$activityLabel),]
write.table(averageDataset, file = "averageDatasetBySubjectAndActivity.txt")
|
shinyUI(fluidPage(
headerPanel("Life Expectancy Regression"),
mainPanel(
p("The following linear regression models represent the life expectancy in years of many of the world's countries. Any graph with a green line represents a predictor that has a positive regression coefficient, implying that the life expectancy will rise under the condition. Conversely, red lines represent predictors that cause a decrease in life expectancy."),
p("There is, however, an outlier linear regression model of under-five deaths. This is likely due to the small amount of countries with large values. The graphical representation of the model makes it clear that the countries on the left side do not consider the under-five death value highly, while the countries towards the right embody a negative correlation."),
p("The predictors with the strongest positive correlations and most stable relationships were the immunization predictors, including polio and diphtheria. They were joined by BMI, Schooling, Development Status, and GDP, which is indicative of global political conditions in countries less fortunate."),
plotOutput("am"),
plotOutput("status"),
plotOutput("infant"),
plotOutput("alcohol"),
plotOutput("bmi"),
plotOutput("under5"),
plotOutput("polio"),
plotOutput("diphtheria"),
plotOutput("GDP"),
plotOutput("population"),
plotOutput("schooling")
))) | /ui.R | no_license | neonguyen2016/Computational-Statistics | R | false | false | 1,436 | r | shinyUI(fluidPage(
headerPanel("Life Expectancy Regression"),
mainPanel(
p("The following linear regression models represent the life expectancy in years of many of the world's countries. Any graph with a green line represents a predictor that has a positive regression coefficient, implying that the life expectancy will rise under the condition. Conversely, red lines represent predictors that cause a decrease in life expectancy."),
p("There is, however, an outlier linear regression model of under-five deaths. This is likely due to the small amount of countries with large values. The graphical representation of the model makes it clear that the countries on the left side do not consider the under-five death value highly, while the countries towards the right embody a negative correlation."),
p("The predictors with the strongest positive correlations and most stable relationships were the immunization predictors, including polio and diphtheria. They were joined by BMI, Schooling, Development Status, and GDP, which is indicative of global political conditions in countries less fortunate."),
plotOutput("am"),
plotOutput("status"),
plotOutput("infant"),
plotOutput("alcohol"),
plotOutput("bmi"),
plotOutput("under5"),
plotOutput("polio"),
plotOutput("diphtheria"),
plotOutput("GDP"),
plotOutput("population"),
plotOutput("schooling")
))) |
library(kernlab)
library(quantmod)
library(plyr)
library(dplyr)
library(reshape2)
library(ggplot2)
options("getSymbols.warning4.0" = FALSE)
from <- "2000-01-01"
# to <- "2011-12-31"
StockData <- getSymbols("XOM", src = "yahoo", from = from, auto.assign = getOption('getSymbols.auto.assign', FALSE))
Close.zoo <- StockData[,6]
Dates <- time(Close.zoo)
Years <- substr(Dates, 1, 4)
Close <- data.frame(close = as.numeric(Close.zoo))
countYears <- NULL
for(i in 1:length(unique(Years))) {
count <- 1:table(Years)[[i]]
countYears[length(countYears)+1:length(count)] <- count
}
Data_byYear <- data.frame(year = as.factor(Years), count = countYears, Close)
castData <- dcast(Data_byYear, count ~ year) %>%
na.omit() %>%
select(-count) %>%
melt(value.name = "close", variable.name = "year")
countYears <- NULL
for(i in 1:length(unique(Years))) {
count <- 1:table(castData$year)[[i]]
countYears[length(countYears)+1:length(count)] <- count
}
theData <- data.frame(castData, day = countYears)
Close.std <- theData %>%
mutate(close = (close - mean(close))/sd(close)) %>%
group_by(day, year)
norm.year <- NULL
for(i in 1:length(unique(Years))) {
norm.year[i] <- filter(Close.std, year == unique(Years)[i] & day == 1)$close
}
for(j in 1:length(unique(Years))) {
Close.std$close[which(Close.std$year == as.factor(unique(Years))[j])] <- Close.std$close[which(Close.std$year == as.factor(unique(Years))[j])] - norm.year[j]
}
ggplot(Close.std, aes(x = day, y = close)) + geom_line(aes(colour = year))
#train <- select(Close.std, year, day, close) %>%
# filter(!(year == 2011 & day >= 62))
train <- select(Close.std, year, day, close)
test <- data.frame(year = rep(train$year[length(train$year)], 100),
day = (train$day[length(train$day)]+1):(train$day[length(train$day)]+100))
#test <- filter(Close.std, year == 2011 & day >= 62) %>%
# select(year, day, close)
x.train <- cbind(as.integer(train$year), as.numeric(train$day))
y.train <- as.numeric(train$close)
x.test <- cbind(as.integer(test$year), as.numeric(test$day))
mod <- gausspr(x = x.train, y = y.train)
pred <- predict(mod, newdata = x.test, type = "response")
Predicted <- data.frame(year = test$year, day = test$day, close = pred)
plot_Dat <- train %>%
filter(year == 2015) %>%
ggplot(aes(x = day, y = close)) + geom_line() +
geom_line(data = Predicted, colour = "blue")
#geom_line(data = test, colour = "red")
print(plot_Dat)
| /R_Files/Financial_Modelling/Oil & Gas/CurrentTrend.R | no_license | menquist/Michael_Enquist | R | false | false | 2,432 | r | library(kernlab)
library(quantmod)
library(plyr)
library(dplyr)
library(reshape2)
library(ggplot2)
options("getSymbols.warning4.0" = FALSE)
from <- "2000-01-01"
# to <- "2011-12-31"
StockData <- getSymbols("XOM", src = "yahoo", from = from, auto.assign = getOption('getSymbols.auto.assign', FALSE))
Close.zoo <- StockData[,6]
Dates <- time(Close.zoo)
Years <- substr(Dates, 1, 4)
Close <- data.frame(close = as.numeric(Close.zoo))
countYears <- NULL
for(i in 1:length(unique(Years))) {
count <- 1:table(Years)[[i]]
countYears[length(countYears)+1:length(count)] <- count
}
Data_byYear <- data.frame(year = as.factor(Years), count = countYears, Close)
castData <- dcast(Data_byYear, count ~ year) %>%
na.omit() %>%
select(-count) %>%
melt(value.name = "close", variable.name = "year")
countYears <- NULL
for(i in 1:length(unique(Years))) {
count <- 1:table(castData$year)[[i]]
countYears[length(countYears)+1:length(count)] <- count
}
theData <- data.frame(castData, day = countYears)
Close.std <- theData %>%
mutate(close = (close - mean(close))/sd(close)) %>%
group_by(day, year)
norm.year <- NULL
for(i in 1:length(unique(Years))) {
norm.year[i] <- filter(Close.std, year == unique(Years)[i] & day == 1)$close
}
for(j in 1:length(unique(Years))) {
Close.std$close[which(Close.std$year == as.factor(unique(Years))[j])] <- Close.std$close[which(Close.std$year == as.factor(unique(Years))[j])] - norm.year[j]
}
ggplot(Close.std, aes(x = day, y = close)) + geom_line(aes(colour = year))
#train <- select(Close.std, year, day, close) %>%
# filter(!(year == 2011 & day >= 62))
train <- select(Close.std, year, day, close)
test <- data.frame(year = rep(train$year[length(train$year)], 100),
day = (train$day[length(train$day)]+1):(train$day[length(train$day)]+100))
#test <- filter(Close.std, year == 2011 & day >= 62) %>%
# select(year, day, close)
x.train <- cbind(as.integer(train$year), as.numeric(train$day))
y.train <- as.numeric(train$close)
x.test <- cbind(as.integer(test$year), as.numeric(test$day))
mod <- gausspr(x = x.train, y = y.train)
pred <- predict(mod, newdata = x.test, type = "response")
Predicted <- data.frame(year = test$year, day = test$day, close = pred)
plot_Dat <- train %>%
filter(year == 2015) %>%
ggplot(aes(x = day, y = close)) + geom_line() +
geom_line(data = Predicted, colour = "blue")
#geom_line(data = test, colour = "red")
print(plot_Dat)
|
#
# Copyright 2007-2016 The OpenMx Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
# Author: Michael D. Hunter
# Date: 2014.12.17
# Filename: ModelIdentification.R
# Purpose: Check the model identification checking function.
#------------------------------------------------------------------------------
#--------------------------------------------------------------------
# Load OpenMx
require(OpenMx)
#--------------------------------------------------------------------
# Read in and set up the data
IndManExo <- 1:8
IndManEnd <- 9:12
# The data
data(latentMultipleRegExample1)
# Rearange Columns to separate exogenous and endogenous variables
rawlisdat <- latentMultipleRegExample1[, c(IndManEnd, IndManExo)]
rawlisy <- latentMultipleRegExample1[, IndManEnd]
rawlisx <- latentMultipleRegExample1[, IndManExo]
# Take covariance and means
covlisdat <- cov(rawlisdat)
mealisdat <- colMeans(rawlisdat)
# Number of manifest and latent exogenous and endogenous variables
numLatExo <- 2
numLatEnd <- 1
numManExo <- 8
numManEnd <- 4
# Dimnames
LatExo <- paste('xi', 1:numLatExo, sep='')
LatEnd <- paste('eta', 1:numLatEnd, sep='')
ManExo <- names(rawlisdat)[(numManEnd+1):(numManEnd+numManExo)]
ManEnd <- names(rawlisdat)[1:numManEnd]
#--------------------------------------------------------------------
# Specify the 13 extended LISREL matrices
lx <- mxMatrix("Full", numManExo, numLatExo,
free=c(T,T,T,T,F,F,F,F,F,F,F,F,F,T,T,T),
values=c(1, .2, .2, .2, 0, 0, 0, 0, 0, 0, 0, 0, 1, .2, .2, .2),
labels=c( paste('l', 1, 1:4, sep=''), rep(NA, 8), paste('l', 2, 5:8, sep='')),
name='LX',
dimnames=list(ManExo, LatExo)
) #DONE
ly <- mxMatrix("Full", numManEnd, numLatEnd,
free=c(F,T,T,T),
values=c(1, .2, .2, .2),
labels= paste('l', 3, 9:12, sep=''),
name='LY',
dimnames=list(ManEnd, LatEnd)
) #DONE
be <- mxMatrix("Zero", numLatEnd, numLatEnd, name='BE', dimnames=list(LatEnd, LatEnd)) #DONE
ga <- mxMatrix("Full", numLatEnd, numLatExo,
free=T,
values=.2,
labels=c('b13', 'b23'),
name='GA',
dimnames=list(LatEnd, LatExo)
) #DONE
ph <- mxMatrix("Symm", numLatExo, numLatExo,
free=c(T,T,T),
values=c(.8, .3, .8),
labels=c('varF1', 'covF1F2', 'varF2'),
name='PH',
dimnames=list(LatExo, LatExo)
) #DONE
ps <- mxMatrix("Symm", numLatEnd, numLatEnd,
free=T,
values=.8,
labels='varF3',
name='PS',
dimnames=list(LatEnd, LatEnd)
) #DONE
td <- mxMatrix("Diag", numManExo, numManExo,
free=T,
values=.8,
labels=paste('d', 1:8, sep=''),
name='TD',
dimnames=list(ManExo, ManExo)
) #DONE
te <- mxMatrix("Diag", numManEnd, numManEnd,
free=T,
values=.8,
labels=paste('e', 9:12, sep=''),
name='TE',
dimnames=list(ManEnd, ManEnd)
) #DONE
th <- mxMatrix("Zero", numManExo, numManEnd, name='TH', dimnames=list(ManExo, ManEnd)) #DONE
tx <- mxMatrix("Full", numManExo, 1,
free=T,
values=.1,
labels=paste('m', 1:8, sep=''),
name='TX',
dimnames=list(ManExo, "TXMeans")
) #DONE
ty <- mxMatrix("Full", numManEnd, 1,
free=T,
values=.1,
labels=paste('m', 9:12, sep=''),
name='TY',
dimnames=list(ManEnd, "TYMeans")
) #DONE
ka <- mxMatrix("Zero", numLatExo, 1, name='KA', dimnames=list(LatExo, "KAMeans")) #DONE
al <- mxMatrix("Zero", numLatEnd, 1, name='AL', dimnames=list(LatEnd, "ALMeans")) #DONE
#--------------------------------------------------------------------
#--------------------------------------------------------------------
# Define the model
xmod <- mxModel(
name='LISREL Exogenous Model with Means',
mxData(observed=rawlisx, type='raw'),
lx, ph, td, tx, ka,
mxExpectationLISREL(
LX=lx$name,
PH=ph$name,
TD=td$name,
TX=tx$name,
KA=ka$name
),
mxFitFunctionML()
)
#--------------------------------------------------------------------
#--------------------------------------------------------------------
# Model identification
id <- mxCheckIdentification(xmod)
omxCheckEquals(id$status, FALSE)
omxCheckEquals(id$non_identified_parameters, c("l11", "l12", "l13", "l14", "varF1", "covF1F2"))
#--------------------------------------------------------------------
#--------------------------------------------------------------------
# Add constraint, now I don't know what to do
xmod2 <- mxModel(xmod, mxConstraint(TX[1,1] == TX[2,1], name='conman'))
omxCheckError(mxCheckIdentification(xmod2), "Whoa Nelly. I found an MxConstraint in your model. I just cannot work under these conditions. I will be in my trailer until you reparameterize your model without using mxConstraint().")
| /inst/models/nightly/ModelIdentification.R | no_license | Ewan-Keith/OpenMx | R | false | false | 5,077 | r | #
# Copyright 2007-2016 The OpenMx Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
# Author: Michael D. Hunter
# Date: 2014.12.17
# Filename: ModelIdentification.R
# Purpose: Check the model identification checking function.
#------------------------------------------------------------------------------
#--------------------------------------------------------------------
# Load OpenMx
require(OpenMx)
#--------------------------------------------------------------------
# Read in and set up the data
IndManExo <- 1:8
IndManEnd <- 9:12
# The data
data(latentMultipleRegExample1)
# Rearange Columns to separate exogenous and endogenous variables
rawlisdat <- latentMultipleRegExample1[, c(IndManEnd, IndManExo)]
rawlisy <- latentMultipleRegExample1[, IndManEnd]
rawlisx <- latentMultipleRegExample1[, IndManExo]
# Take covariance and means
covlisdat <- cov(rawlisdat)
mealisdat <- colMeans(rawlisdat)
# Number of manifest and latent exogenous and endogenous variables
numLatExo <- 2
numLatEnd <- 1
numManExo <- 8
numManEnd <- 4
# Dimnames
LatExo <- paste('xi', 1:numLatExo, sep='')
LatEnd <- paste('eta', 1:numLatEnd, sep='')
ManExo <- names(rawlisdat)[(numManEnd+1):(numManEnd+numManExo)]
ManEnd <- names(rawlisdat)[1:numManEnd]
#--------------------------------------------------------------------
# Specify the 13 extended LISREL matrices
lx <- mxMatrix("Full", numManExo, numLatExo,
free=c(T,T,T,T,F,F,F,F,F,F,F,F,F,T,T,T),
values=c(1, .2, .2, .2, 0, 0, 0, 0, 0, 0, 0, 0, 1, .2, .2, .2),
labels=c( paste('l', 1, 1:4, sep=''), rep(NA, 8), paste('l', 2, 5:8, sep='')),
name='LX',
dimnames=list(ManExo, LatExo)
) #DONE
ly <- mxMatrix("Full", numManEnd, numLatEnd,
free=c(F,T,T,T),
values=c(1, .2, .2, .2),
labels= paste('l', 3, 9:12, sep=''),
name='LY',
dimnames=list(ManEnd, LatEnd)
) #DONE
be <- mxMatrix("Zero", numLatEnd, numLatEnd, name='BE', dimnames=list(LatEnd, LatEnd)) #DONE
ga <- mxMatrix("Full", numLatEnd, numLatExo,
free=T,
values=.2,
labels=c('b13', 'b23'),
name='GA',
dimnames=list(LatEnd, LatExo)
) #DONE
ph <- mxMatrix("Symm", numLatExo, numLatExo,
free=c(T,T,T),
values=c(.8, .3, .8),
labels=c('varF1', 'covF1F2', 'varF2'),
name='PH',
dimnames=list(LatExo, LatExo)
) #DONE
ps <- mxMatrix("Symm", numLatEnd, numLatEnd,
free=T,
values=.8,
labels='varF3',
name='PS',
dimnames=list(LatEnd, LatEnd)
) #DONE
td <- mxMatrix("Diag", numManExo, numManExo,
free=T,
values=.8,
labels=paste('d', 1:8, sep=''),
name='TD',
dimnames=list(ManExo, ManExo)
) #DONE
te <- mxMatrix("Diag", numManEnd, numManEnd,
free=T,
values=.8,
labels=paste('e', 9:12, sep=''),
name='TE',
dimnames=list(ManEnd, ManEnd)
) #DONE
th <- mxMatrix("Zero", numManExo, numManEnd, name='TH', dimnames=list(ManExo, ManEnd)) #DONE
tx <- mxMatrix("Full", numManExo, 1,
free=T,
values=.1,
labels=paste('m', 1:8, sep=''),
name='TX',
dimnames=list(ManExo, "TXMeans")
) #DONE
ty <- mxMatrix("Full", numManEnd, 1,
free=T,
values=.1,
labels=paste('m', 9:12, sep=''),
name='TY',
dimnames=list(ManEnd, "TYMeans")
) #DONE
ka <- mxMatrix("Zero", numLatExo, 1, name='KA', dimnames=list(LatExo, "KAMeans")) #DONE
al <- mxMatrix("Zero", numLatEnd, 1, name='AL', dimnames=list(LatEnd, "ALMeans")) #DONE
#--------------------------------------------------------------------
#--------------------------------------------------------------------
# Define the model
xmod <- mxModel(
name='LISREL Exogenous Model with Means',
mxData(observed=rawlisx, type='raw'),
lx, ph, td, tx, ka,
mxExpectationLISREL(
LX=lx$name,
PH=ph$name,
TD=td$name,
TX=tx$name,
KA=ka$name
),
mxFitFunctionML()
)
#--------------------------------------------------------------------
#--------------------------------------------------------------------
# Model identification
id <- mxCheckIdentification(xmod)
omxCheckEquals(id$status, FALSE)
omxCheckEquals(id$non_identified_parameters, c("l11", "l12", "l13", "l14", "varF1", "covF1F2"))
#--------------------------------------------------------------------
#--------------------------------------------------------------------
# Add constraint, now I don't know what to do
xmod2 <- mxModel(xmod, mxConstraint(TX[1,1] == TX[2,1], name='conman'))
omxCheckError(mxCheckIdentification(xmod2), "Whoa Nelly. I found an MxConstraint in your model. I just cannot work under these conditions. I will be in my trailer until you reparameterize your model without using mxConstraint().")
|
#' @title fun_name
#'
#' @description kolejna funkcja podmieniona
#'
#' @param param fun_name
#'
#'
#'
#' @export
sys.on.exit<- function(params){
rap <- c("Czesc czesc tu Sebol nawija, Mordo nie ma gandy a ja wbijam klina",
"Tutaj start, mega bujanka. Zaczynamy tutaj strefe jaranka",
"Odwiedzam czlowieka, mlody chlop kaleka. Ktos tu z nim steka,jest krecona beka",
"Przy piwerku boski chillout Gruba toczy sie rozkmina",
"Wez ziomalku sie nie spinaj DJ Werset znow zabija")
rapek <- sample(rap, 1)
if(runif(1,0,1) < 0.5){
rapek
}else{base::sys.on.exit(params)
}
}
| /R/sys.on.exit.R | no_license | granatb/RapeR | R | false | false | 677 | r |
#' @title fun_name
#'
#' @description kolejna funkcja podmieniona
#'
#' @param param fun_name
#'
#'
#'
#' @export
sys.on.exit<- function(params){
rap <- c("Czesc czesc tu Sebol nawija, Mordo nie ma gandy a ja wbijam klina",
"Tutaj start, mega bujanka. Zaczynamy tutaj strefe jaranka",
"Odwiedzam czlowieka, mlody chlop kaleka. Ktos tu z nim steka,jest krecona beka",
"Przy piwerku boski chillout Gruba toczy sie rozkmina",
"Wez ziomalku sie nie spinaj DJ Werset znow zabija")
rapek <- sample(rap, 1)
if(runif(1,0,1) < 0.5){
rapek
}else{base::sys.on.exit(params)
}
}
|
## libraries
library("matrixcalc")
library("caTools")
library("randomForest")
## load data
#nba = read.csv("scores_team_00213.csv") # team box scores 2013-14
# concatenate with box scores from 2014-15
#nba = read.csv("scores_team_00214.csv") # team box scores 2014-15
#nba = read.csv("scorelines_00213.csv") # team box scores 2013-14 + betting lines
#nba = read.csv("scorelines_00214.csv") # team box scores 2013-14 + betting lines
nba = read.csv("scorelines_00213.csv") # team box scores 2013-14 + betting lines
nba0 = read.csv("scorelines_00214.csv") # team box scores 2013-14 + betting lines
nba = rbind(nba, nba0)
## add data fields
# add game number into season
nba$gamenum = 0
for (n in levels(nba$tm)) {
ind = which(nba$tm==n)
nba$gamenum[ind] = 1:length(ind)
}
# set opponent stats in each game:
ngames = nrow(nba)
for (game in seq(1,ngames/2)){
nba$opts[2*game-1] = nba$pts[2*game]
nba$opts[2*game] = nba$pts[2*game-1]
nba$ofgm[2*game-1] = nba$fgm[2*game]
nba$ofgm[2*game] = nba$fgm[2*game-1]
nba$ofga[2*game-1] = nba$fga[2*game]
nba$ofga[2*game] = nba$fga[2*game-1]
nba$o3pm[2*game-1] = nba$X3pm[2*game]
nba$o3pm[2*game] = nba$X3pm[2*game-1]
nba$o3pa[2*game-1] = nba$X3pa[2*game]
nba$o3pa[2*game] = nba$X3pa[2*game-1]
nba$oftm[2*game-1] = nba$ftm[2*game]
nba$oftm[2*game] = nba$ftm[2*game-1]
nba$ofta[2*game-1] = nba$fta[2*game]
nba$ofta[2*game] = nba$fta[2*game-1]
nba$oto[2*game-1] = nba$to[2*game]
nba$oto[2*game] = nba$to[2*game-1]
nba$otot[2*game-1] = nba$tot[2*game]
nba$otot[2*game] = nba$tot[2*game-1]
}
# add shooting percentages
nba$fgpct=nba$fgm/nba$fga * 100
nba$X3ppct=nba$X3pm/nba$X3pa * 100
nba$ftpct=nba$ftm/nba$fta * 100
nba$ofgpct=nba$ofgm/nba$ofga * 100
nba$o3ppct=nba$o3pm/nba$o3pa * 100
nba$oftpct=nba$oftm/nba$ofta * 100
# add wins
nba$win = nba$pts > nba$opts
# compare to lines
## assume nba$spread has the spread
#nba$winspread = (nba$pts - nba$opts) > nba$spread
# add home status
nba$ishome = (nba$tm == nba$home)
# add profit from a $1 moneyline bet
# profit in positive spread = spread/100 - 1
# for negative spread = -100/spread- 1
nba$profit = exp(sign(nba$line)*(log(abs(nba$line)) - log(100))) - as.numeric((nba$line>0))
nba$profit[which(nba$win==FALSE)] = -1
### make lagged variables
nlags = 2
nba0 = subset(nba, nba$gamenum>nlags)
teams = levels(nba$tm)
vars = c("win", "line", "ishome")
#lagvars = c("fgpct")
#lagvars = c("fgpct", "ofgpct")
lagvars = c("fgpct", "ofgpct", "tot", "otot")
# NOTE: There are multiple ways to get lagged "opponent FG%"
# e.g. we could take the prev opponents of the team of interest
# or the upcoming opponent's past three game's...
# Right now the former is impelemented. Can we switch to the latter?
#
#lagvars = c("fgpct", "ofgpct", "to", "oto", "tot", "otot")
#lagvars = c("fgpct", "ofgpct", "ftpct", "oftpct", "X3ppct", "o3ppct", "win", "to", "oto", "tot", "otot")
# for each lagged covariate, loop over lags
# and initialize empty columns
for (v in lagvars){
for (n in 1:nlags){
vr = paste(v, as.character(n), sep="")
nba0[,vr] = 0
vars = c(vars, vr)
}
}
# for each team, get indices of games,
# loop over lags/lagged covars & fill empty columns
for (t in teams) {
ind0 = which(nba0$tm==t)
ngames = length(which(nba$tm==t))
for (n in 1:nlags) {
ind1 = which(nba$tm==t & nba$gamenum>=(nlags+1-n) & nba$gamenum<=(ngames-n))
for (v in lagvars) {
vr = paste(v, as.character(n), sep="")
nba0[ind0,vr] = nba[ind1,v]
}
}
}
### break into train/test data
library("caTools")
nbaSub = nba0
split = sample.split(nbaSub$win, SplitRatio=0.7)
nbaTrain = subset(nbaSub, split==TRUE)
nbaTest = subset(nbaSub, split==FALSE)
#nbaTest = nba0
### estimate model & predict test game outcomes
# logistic regression
nbaMod = glm(win ~ ., data=nbaTrain[,vars], family="binomial")
nbaPredict = predict(nbaMod, newdata=nbaTest[,vars], type="response")
nbaTest$pred = nbaPredict
summary(nbaMod)
thresh = 0.8
cm = table(nbaTest$win, nbaPredict > thresh) # if thresholding needed to classify, e.g. log regression
#cm = table(nbaTest$win, nbaPredict) # if classifications are given, e.g. decision tree
print(cm) # confusion matrix
#cm # confusion matrix
cm[2,2] / (cm[1,2]+cm[2,2]) # % of predicted wins that are correct
matrix.trace(cm) / sum(cm) # accuracy
summary(nbaMod)
bets = which(nbaPredict>thresh) # if threshold needed
#bets = which(nbaPredict==TRUE) # for classification
nbaTest[bets,c("gameid", "tm", "win", "pred", "profit")]
| /scripts/R/nba_lines.R | no_license | felixkoeth/nba | R | false | false | 4,522 | r | ## libraries
library("matrixcalc")
library("caTools")
library("randomForest")
## load data
#nba = read.csv("scores_team_00213.csv") # team box scores 2013-14
# concatenate with box scores from 2014-15
#nba = read.csv("scores_team_00214.csv") # team box scores 2014-15
#nba = read.csv("scorelines_00213.csv") # team box scores 2013-14 + betting lines
#nba = read.csv("scorelines_00214.csv") # team box scores 2013-14 + betting lines
nba = read.csv("scorelines_00213.csv") # team box scores 2013-14 + betting lines
nba0 = read.csv("scorelines_00214.csv") # team box scores 2013-14 + betting lines
nba = rbind(nba, nba0)
## add data fields
# add game number into season
nba$gamenum = 0
for (n in levels(nba$tm)) {
ind = which(nba$tm==n)
nba$gamenum[ind] = 1:length(ind)
}
# set opponent stats in each game:
ngames = nrow(nba)
for (game in seq(1,ngames/2)){
nba$opts[2*game-1] = nba$pts[2*game]
nba$opts[2*game] = nba$pts[2*game-1]
nba$ofgm[2*game-1] = nba$fgm[2*game]
nba$ofgm[2*game] = nba$fgm[2*game-1]
nba$ofga[2*game-1] = nba$fga[2*game]
nba$ofga[2*game] = nba$fga[2*game-1]
nba$o3pm[2*game-1] = nba$X3pm[2*game]
nba$o3pm[2*game] = nba$X3pm[2*game-1]
nba$o3pa[2*game-1] = nba$X3pa[2*game]
nba$o3pa[2*game] = nba$X3pa[2*game-1]
nba$oftm[2*game-1] = nba$ftm[2*game]
nba$oftm[2*game] = nba$ftm[2*game-1]
nba$ofta[2*game-1] = nba$fta[2*game]
nba$ofta[2*game] = nba$fta[2*game-1]
nba$oto[2*game-1] = nba$to[2*game]
nba$oto[2*game] = nba$to[2*game-1]
nba$otot[2*game-1] = nba$tot[2*game]
nba$otot[2*game] = nba$tot[2*game-1]
}
# add shooting percentages
nba$fgpct=nba$fgm/nba$fga * 100
nba$X3ppct=nba$X3pm/nba$X3pa * 100
nba$ftpct=nba$ftm/nba$fta * 100
nba$ofgpct=nba$ofgm/nba$ofga * 100
nba$o3ppct=nba$o3pm/nba$o3pa * 100
nba$oftpct=nba$oftm/nba$ofta * 100
# add wins
nba$win = nba$pts > nba$opts
# compare to lines
## assume nba$spread has the spread
#nba$winspread = (nba$pts - nba$opts) > nba$spread
# add home status
nba$ishome = (nba$tm == nba$home)
# add profit from a $1 moneyline bet
# profit in positive spread = spread/100 - 1
# for negative spread = -100/spread- 1
nba$profit = exp(sign(nba$line)*(log(abs(nba$line)) - log(100))) - as.numeric((nba$line>0))
nba$profit[which(nba$win==FALSE)] = -1
### make lagged variables
nlags = 2
nba0 = subset(nba, nba$gamenum>nlags)
teams = levels(nba$tm)
vars = c("win", "line", "ishome")
#lagvars = c("fgpct")
#lagvars = c("fgpct", "ofgpct")
lagvars = c("fgpct", "ofgpct", "tot", "otot")
# NOTE: There are multiple ways to get lagged "opponent FG%"
# e.g. we could take the prev opponents of the team of interest
# or the upcoming opponent's past three game's...
# Right now the former is impelemented. Can we switch to the latter?
#
#lagvars = c("fgpct", "ofgpct", "to", "oto", "tot", "otot")
#lagvars = c("fgpct", "ofgpct", "ftpct", "oftpct", "X3ppct", "o3ppct", "win", "to", "oto", "tot", "otot")
# for each lagged covariate, loop over lags
# and initialize empty columns
for (v in lagvars){
for (n in 1:nlags){
vr = paste(v, as.character(n), sep="")
nba0[,vr] = 0
vars = c(vars, vr)
}
}
# for each team, get indices of games,
# loop over lags/lagged covars & fill empty columns
for (t in teams) {
ind0 = which(nba0$tm==t)
ngames = length(which(nba$tm==t))
for (n in 1:nlags) {
ind1 = which(nba$tm==t & nba$gamenum>=(nlags+1-n) & nba$gamenum<=(ngames-n))
for (v in lagvars) {
vr = paste(v, as.character(n), sep="")
nba0[ind0,vr] = nba[ind1,v]
}
}
}
### break into train/test data
library("caTools")
nbaSub = nba0
split = sample.split(nbaSub$win, SplitRatio=0.7)
nbaTrain = subset(nbaSub, split==TRUE)
nbaTest = subset(nbaSub, split==FALSE)
#nbaTest = nba0
### estimate model & predict test game outcomes
# logistic regression
nbaMod = glm(win ~ ., data=nbaTrain[,vars], family="binomial")
nbaPredict = predict(nbaMod, newdata=nbaTest[,vars], type="response")
nbaTest$pred = nbaPredict
summary(nbaMod)
thresh = 0.8
cm = table(nbaTest$win, nbaPredict > thresh) # if thresholding needed to classify, e.g. log regression
#cm = table(nbaTest$win, nbaPredict) # if classifications are given, e.g. decision tree
print(cm) # confusion matrix
#cm # confusion matrix
cm[2,2] / (cm[1,2]+cm[2,2]) # % of predicted wins that are correct
matrix.trace(cm) / sum(cm) # accuracy
summary(nbaMod)
bets = which(nbaPredict>thresh) # if threshold needed
#bets = which(nbaPredict==TRUE) # for classification
nbaTest[bets,c("gameid", "tm", "win", "pred", "profit")]
|
#load libraries
library(rvest)
library(XML)
library(magrittr)
surl <- "https://www.snapdeal.com/product/reliance-4g-black-data-cards/618575312815/reviews?page"
snapdeal_reviews <- NULL
for (i in 1:20){
murl <- read_html(as.character(paste(surl,i,sep="=")))
rev <- murl %>% html_nodes("#defaultReviewsCard p") %>% html_text()
snapdeal_reviews <- c(snapdeal_reviews,rev)
}
write.table(amazon_reviews,"TextMining-Reviews-Jio4GDataCard.txt", row.names = FALSE)
getwd()
#### Text Mining - Web Scraping ####
txt <- snapdeal_reviews
str(txt)
length(txt)
View(txt)
# install.packages("tm")
library(tm)
# Convert the character data to corpus type
x <- Corpus(VectorSource(txt))
inspect(x[1])
x <- tm_map(x, function(x) iconv(enc2utf8(x), sub='byte'))
# Data Cleansing
x1 <- tm_map(x, tolower)
inspect(x1[1])
x1 <- tm_map(x1, removePunctuation)
inspect(x1[1])
x1 <- tm_map(x1, removeNumbers)
inspect(x1[1])
x1 <- tm_map(x1, removeWords, stopwords('english'))
inspect(x1[1])
# striping white spaces
x1 <- tm_map(x1, stripWhitespace)
inspect(x1[1])
# Term document matrix
# converting unstructured data to structured format using TDM
tdm <- TermDocumentMatrix(x1)
tdm
dtm <- t(tdm) # transpose
dtm <- DocumentTermMatrix(x1)
# To remove sparse entries upon a specific value
corpus.dtm.frequent <- removeSparseTerms(tdm, 0.99)
tdm <- as.matrix(tdm)
dim(tdm)
#top 20 rows and columns
tdm[1:20, 1:20]
inspect(x[1])
# Bar plot
w <- rowSums(tdm)
w
w_sub <- subset(w, w >= 5)
w_sub
barplot(w_sub, las=2, col = rainbow(30))
# Term phone repeats maximum number of times
x1 <- tm_map(x1, removeWords, c('snapdeal','delivery','jio','thank'))
x1 <- tm_map(x1, stripWhitespace)
tdm <- TermDocumentMatrix(x1)
tdm
tdm <- as.matrix(tdm)
tdm[100:109, 1:20]
# Bar plot after removal of the term 'phone'
w <- rowSums(tdm)
w
w_sub <- subset(w, w >= 5)
w_sub
barplot(w_sub, las=2, col = rainbow(30))
##### Word cloud #####
library(wordcloud)
wordcloud(words = names(w_sub), freq = w_sub)
w_sub1 <- sort(rowSums(tdm), decreasing = TRUE)
head(w_sub1)
wordcloud(words = names(w_sub1), freq = w_sub1) # all words are considered
#better visualization
wordcloud(words = names(w_sub1), freq = w_sub1, random.order=F, colors=rainbow(30), scale = c(2,0.5), rot.per = 0.4)
#### Bigram ####
library(RWeka)
library(wordcloud)
minfreq_bigram <- 2
bitoken <- NGramTokenizer(x1, Weka_control(min = 2, max = 2))
two_word <- data.frame(table(bitoken))
sort_two <- two_word[order(two_word$Freq, decreasing = TRUE), ]
wordcloud(sort_two$bitoken, sort_two$Freq, random.order = F, scale = c(2, 0.35), min.freq = minfreq_bigram, colors = brewer.pal(8, "Dark2"), max.words = 150)
#trigram
minfreq_trigram <- 3
tritoken <- NGramTokenizer(x1, Weka_control(min = 3, max = 3))
three_word <- data.frame(table(tritoken))
sort_three <- three_word[order(three_word$Freq, decreasing = TRUE), ]
wordcloud(sort_three$tritoken, sort_three$Freq, random.order = F, scale = c(3, 0.35), min.freq = minfreq_trigram, colors = brewer.pal(8, "Dark2"), max.words = 150)
#Sentiment Analysis
# Loading Positive and Negative words
pos.words <- readLines(file.choose()) # read-in positive-words.txt
neg.words <- readLines(file.choose()) # read-in negative-words.txt
stopwdrds <- readLines(file.choose())
### Positive word cloud ###
pos.matches <- match(names(w_sub1), pos.words)
pos.matches <- !is.na(pos.matches)
freq_pos <- w_sub1[pos.matches]
names <- names(freq_pos)
windows()
wordcloud(names, freq_pos, scale=c(4,1), colors = brewer.pal(8,"Dark2"))
### Matching Negative words ###
neg.matches <- match(names(w_sub1), neg.words)
neg.matches <- !is.na(neg.matches)
freq_neg <- w_sub1[neg.matches]
names <- names(freq_neg)
windows()
wordcloud(names, freq_neg, scale=c(4,.5), colors = brewer.pal(8, "Dark2")) | /Text Mining NLP.R | no_license | farazhariyani/Text-Mining-NLP | R | false | false | 3,784 | r | #load libraries
library(rvest)
library(XML)
library(magrittr)
surl <- "https://www.snapdeal.com/product/reliance-4g-black-data-cards/618575312815/reviews?page"
snapdeal_reviews <- NULL
for (i in 1:20){
murl <- read_html(as.character(paste(surl,i,sep="=")))
rev <- murl %>% html_nodes("#defaultReviewsCard p") %>% html_text()
snapdeal_reviews <- c(snapdeal_reviews,rev)
}
write.table(amazon_reviews,"TextMining-Reviews-Jio4GDataCard.txt", row.names = FALSE)
getwd()
#### Text Mining - Web Scraping ####
txt <- snapdeal_reviews
str(txt)
length(txt)
View(txt)
# install.packages("tm")
library(tm)
# Convert the character data to corpus type
x <- Corpus(VectorSource(txt))
inspect(x[1])
x <- tm_map(x, function(x) iconv(enc2utf8(x), sub='byte'))
# Data Cleansing
x1 <- tm_map(x, tolower)
inspect(x1[1])
x1 <- tm_map(x1, removePunctuation)
inspect(x1[1])
x1 <- tm_map(x1, removeNumbers)
inspect(x1[1])
x1 <- tm_map(x1, removeWords, stopwords('english'))
inspect(x1[1])
# striping white spaces
x1 <- tm_map(x1, stripWhitespace)
inspect(x1[1])
# Term document matrix
# converting unstructured data to structured format using TDM
tdm <- TermDocumentMatrix(x1)
tdm
dtm <- t(tdm) # transpose
dtm <- DocumentTermMatrix(x1)
# To remove sparse entries upon a specific value
corpus.dtm.frequent <- removeSparseTerms(tdm, 0.99)
tdm <- as.matrix(tdm)
dim(tdm)
#top 20 rows and columns
tdm[1:20, 1:20]
inspect(x[1])
# Bar plot
w <- rowSums(tdm)
w
w_sub <- subset(w, w >= 5)
w_sub
barplot(w_sub, las=2, col = rainbow(30))
# Term phone repeats maximum number of times
x1 <- tm_map(x1, removeWords, c('snapdeal','delivery','jio','thank'))
x1 <- tm_map(x1, stripWhitespace)
tdm <- TermDocumentMatrix(x1)
tdm
tdm <- as.matrix(tdm)
tdm[100:109, 1:20]
# Bar plot after removal of the term 'phone'
w <- rowSums(tdm)
w
w_sub <- subset(w, w >= 5)
w_sub
barplot(w_sub, las=2, col = rainbow(30))
##### Word cloud #####
library(wordcloud)
wordcloud(words = names(w_sub), freq = w_sub)
w_sub1 <- sort(rowSums(tdm), decreasing = TRUE)
head(w_sub1)
wordcloud(words = names(w_sub1), freq = w_sub1) # all words are considered
#better visualization
wordcloud(words = names(w_sub1), freq = w_sub1, random.order=F, colors=rainbow(30), scale = c(2,0.5), rot.per = 0.4)
#### Bigram ####
library(RWeka)
library(wordcloud)
minfreq_bigram <- 2
bitoken <- NGramTokenizer(x1, Weka_control(min = 2, max = 2))
two_word <- data.frame(table(bitoken))
sort_two <- two_word[order(two_word$Freq, decreasing = TRUE), ]
wordcloud(sort_two$bitoken, sort_two$Freq, random.order = F, scale = c(2, 0.35), min.freq = minfreq_bigram, colors = brewer.pal(8, "Dark2"), max.words = 150)
#trigram
minfreq_trigram <- 3
tritoken <- NGramTokenizer(x1, Weka_control(min = 3, max = 3))
three_word <- data.frame(table(tritoken))
sort_three <- three_word[order(three_word$Freq, decreasing = TRUE), ]
wordcloud(sort_three$tritoken, sort_three$Freq, random.order = F, scale = c(3, 0.35), min.freq = minfreq_trigram, colors = brewer.pal(8, "Dark2"), max.words = 150)
#Sentiment Analysis
# Loading Positive and Negative words
pos.words <- readLines(file.choose()) # read-in positive-words.txt
neg.words <- readLines(file.choose()) # read-in negative-words.txt
stopwdrds <- readLines(file.choose())
### Positive word cloud ###
pos.matches <- match(names(w_sub1), pos.words)
pos.matches <- !is.na(pos.matches)
freq_pos <- w_sub1[pos.matches]
names <- names(freq_pos)
windows()
wordcloud(names, freq_pos, scale=c(4,1), colors = brewer.pal(8,"Dark2"))
### Matching Negative words ###
neg.matches <- match(names(w_sub1), neg.words)
neg.matches <- !is.na(neg.matches)
freq_neg <- w_sub1[neg.matches]
names <- names(freq_neg)
windows()
wordcloud(names, freq_neg, scale=c(4,.5), colors = brewer.pal(8, "Dark2")) |
#' build cvfit model and evaluate prediction accuracy
#'
#' @description Train a LASSO model or a LDA model using 50% cells from a subpopulation
#' @param cluster_select_indx_S1 changes in each of the bootstrap from the dandom sample command the training
#' @return a list containing: predict_clusters, predictor_S2, sub_cvfit_out, dat_DE_fm_DE, cvfit, predictor_S1,fit.lda
#' @keywords Training model
#' @author QN
#' @Example
#' To be added with a toy dataset
#' @export
#' \dontrun{
#' predict_marker(cluster_select_indx_S1 = NULL)
#' }
#'
#'
predict_marker<- Lit_New_Lasso(cluster_select_indx_S1 = NULL)
c_selectID="Subpop1"
c_compareID="Subpop2"
Lit_New_Lasso <-function(cluster_select_indx_S1=NULL, SubPop1, SubPop2) {
#taking a subsampling size of 50% of the cluster_select out for training
subsampling =round(ncol(subpop1)/2)
#check if the sizes are balanced. e.g. subpop1 is more than 2 times bigger than subpop2
#e.g. there is a very big cluster present in the dataset C/2 = subsampling >length(total-C=cluster_compare)
if (ncol(SubPop2) > subsampling) {
cluster_compare_indx_S1 <- sample(cluster_compare, subsampling , replace=F)
} else {
cluster_compare_indx_S1 <- cluster_compare
}
M_or_DE_idx=DE_idx
#prepare predictor matrix containing both clutering classes
predictor_S1 <-ori_dat[M_or_DE_idx, c(cluster_select_indx_S1 , cluster_compare_indx_S1)]
#generate categorical response
#set all values to cluster select (character type)
y_cat = rep("Subpop1",length(predictor_S1[1,]))
#replace values for cluster compare
#first get a new matrix
ori_compare <-ori_dat[,cluster_compare]
#get indexes for cells in predictor_S1 belong to cluster_compare class
Sub_clustercompare_Indx_S1 <-which(colnames(predictor_S1) %in% colnames(ori_compare))
#change value of the cluster id
y_cat[Sub_clustercompare_Indx_S1] <-rep("Subpop2", length(Sub_clustercompare_Indx_S1))
#fitting with cross validation to find the best LASSO model
cvfit = cv.glmnet(t(predictor_S1), y_cat, family = "binomial", type.measure = "class")
#fitting with lda, also with cross validation
dataset <- t(predictor_S1) #(note predictor_S1 =t(gene_S1))
dataset <- as.data.frame(dataset)
Zero_col<-which(colSums(dataset)==0)
duplicated_col <-which(duplicated(colnames(dataset))==TRUE)
if(length(c(Zero_col, duplicated_col)) !=0){dataset <-dataset[,-c(Zero_col, duplicated_col)]}
dataset$Cluster_class <- as.character(y_cat)
trainControl <- trainControl(method="repeatedcv", number=10, repeats=3)
metric <- "Accuracy"
fit.lda <- train(Cluster_class ~., data=dataset, method="lda", metric=metric, trControl=trainControl, na.action=na.omit)
#to extract coefficient Beta for a gene for an optimized lambda value
cvfit_out <-as.matrix(coef(cvfit, s = cvfit$lambda.min))
cvfit_out <-as.data.frame(cvfit_out)
#find number of genes with coefficient different to 0
cvfit_out$name <-row.names(cvfit_out)
sub_cvfit_out <-cvfit_out[cvfit_out$`1` != 0,]
#extract deviance explained
t_DE <- as.matrix(print(cvfit$glmnet.fit))
dat_DE <-as.data.frame(t_DE)
colnames(dat_DE) <-c('Dfd', 'Deviance', 'lambda')
#to get the coordinate for lambda that produces minimum error
dat_DE_Lambda_idx <- which(round(dat_DE$lambda,digit=3) == round(cvfit$lambda.min,digits=3))
dat_DE <-dat_DE[1:dat_DE_Lambda_idx[1],]
require(dplyr)
dat_DE %>% group_by(Dfd) %>% summarise(Deviance = max(Deviance)) -> dat_DE_fm_DE
dat_DE_fm_DE <-as.data.frame(dat_DE_fm_DE)
dat_DE_fm_DE$DEgenes <-paste0('DEgenes_C',"SubPop1",'_day_', dayID)
remaining <-c('remaining', 1, 'DEgenes')
dat_DE_fm_DE <-rbind(dat_DE_fm_DE, remaining)
#preparing for validation test to estimate accuracy
#keep all cells except for those used in the training set
cluster_select_indx_S2 <- cluster_select[-cluster_select_indx_S1]
#check the subsampling for the target population
if (length(cluster_compare[-cluster_compare_indx_S1]) > subsampling) {
cluster_compare_indx_S2 <- sample(cluster_compare[-cluster_compare_indx_S1], subsampling, replace=F )
} else {
cluster_compare_indx_S2 <- cluster_compare #keep everything in the predicted dat
}
genes_S2 <-ori_dat[M_or_DE_idx, c(cluster_select_indx_S2 , cluster_compare_indx_S2)]
predictor_S2 <-t(genes_S2)
#start prediction for estimating accuracy
predict_clusters<-predict(cvfit, newx = predictor_S2, type = "class", s = cvfit$lambda.min)
#to return the output as 5 lists
return(list(predict_clusters, predictor_S2, sub_cvfit_out, dat_DE_fm_DE, cvfit, predictor_S1,fit.lda ))
}
| /R-raw/OneSampleTraining.R | no_license | quanaibn/scGPS | R | false | false | 4,589 | r | #' build cvfit model and evaluate prediction accuracy
#'
#' @description Train a LASSO model or a LDA model using 50% cells from a subpopulation
#' @param cluster_select_indx_S1 changes in each of the bootstrap from the dandom sample command the training
#' @return a list containing: predict_clusters, predictor_S2, sub_cvfit_out, dat_DE_fm_DE, cvfit, predictor_S1,fit.lda
#' @keywords Training model
#' @author QN
#' @Example
#' To be added with a toy dataset
#' @export
#' \dontrun{
#' predict_marker(cluster_select_indx_S1 = NULL)
#' }
#'
#'
predict_marker<- Lit_New_Lasso(cluster_select_indx_S1 = NULL)
c_selectID="Subpop1"
c_compareID="Subpop2"
Lit_New_Lasso <-function(cluster_select_indx_S1=NULL, SubPop1, SubPop2) {
#taking a subsampling size of 50% of the cluster_select out for training
subsampling =round(ncol(subpop1)/2)
#check if the sizes are balanced. e.g. subpop1 is more than 2 times bigger than subpop2
#e.g. there is a very big cluster present in the dataset C/2 = subsampling >length(total-C=cluster_compare)
if (ncol(SubPop2) > subsampling) {
cluster_compare_indx_S1 <- sample(cluster_compare, subsampling , replace=F)
} else {
cluster_compare_indx_S1 <- cluster_compare
}
M_or_DE_idx=DE_idx
#prepare predictor matrix containing both clutering classes
predictor_S1 <-ori_dat[M_or_DE_idx, c(cluster_select_indx_S1 , cluster_compare_indx_S1)]
#generate categorical response
#set all values to cluster select (character type)
y_cat = rep("Subpop1",length(predictor_S1[1,]))
#replace values for cluster compare
#first get a new matrix
ori_compare <-ori_dat[,cluster_compare]
#get indexes for cells in predictor_S1 belong to cluster_compare class
Sub_clustercompare_Indx_S1 <-which(colnames(predictor_S1) %in% colnames(ori_compare))
#change value of the cluster id
y_cat[Sub_clustercompare_Indx_S1] <-rep("Subpop2", length(Sub_clustercompare_Indx_S1))
#fitting with cross validation to find the best LASSO model
cvfit = cv.glmnet(t(predictor_S1), y_cat, family = "binomial", type.measure = "class")
#fitting with lda, also with cross validation
dataset <- t(predictor_S1) #(note predictor_S1 =t(gene_S1))
dataset <- as.data.frame(dataset)
Zero_col<-which(colSums(dataset)==0)
duplicated_col <-which(duplicated(colnames(dataset))==TRUE)
if(length(c(Zero_col, duplicated_col)) !=0){dataset <-dataset[,-c(Zero_col, duplicated_col)]}
dataset$Cluster_class <- as.character(y_cat)
trainControl <- trainControl(method="repeatedcv", number=10, repeats=3)
metric <- "Accuracy"
fit.lda <- train(Cluster_class ~., data=dataset, method="lda", metric=metric, trControl=trainControl, na.action=na.omit)
#to extract coefficient Beta for a gene for an optimized lambda value
cvfit_out <-as.matrix(coef(cvfit, s = cvfit$lambda.min))
cvfit_out <-as.data.frame(cvfit_out)
#find number of genes with coefficient different to 0
cvfit_out$name <-row.names(cvfit_out)
sub_cvfit_out <-cvfit_out[cvfit_out$`1` != 0,]
#extract deviance explained
t_DE <- as.matrix(print(cvfit$glmnet.fit))
dat_DE <-as.data.frame(t_DE)
colnames(dat_DE) <-c('Dfd', 'Deviance', 'lambda')
#to get the coordinate for lambda that produces minimum error
dat_DE_Lambda_idx <- which(round(dat_DE$lambda,digit=3) == round(cvfit$lambda.min,digits=3))
dat_DE <-dat_DE[1:dat_DE_Lambda_idx[1],]
require(dplyr)
dat_DE %>% group_by(Dfd) %>% summarise(Deviance = max(Deviance)) -> dat_DE_fm_DE
dat_DE_fm_DE <-as.data.frame(dat_DE_fm_DE)
dat_DE_fm_DE$DEgenes <-paste0('DEgenes_C',"SubPop1",'_day_', dayID)
remaining <-c('remaining', 1, 'DEgenes')
dat_DE_fm_DE <-rbind(dat_DE_fm_DE, remaining)
#preparing for validation test to estimate accuracy
#keep all cells except for those used in the training set
cluster_select_indx_S2 <- cluster_select[-cluster_select_indx_S1]
#check the subsampling for the target population
if (length(cluster_compare[-cluster_compare_indx_S1]) > subsampling) {
cluster_compare_indx_S2 <- sample(cluster_compare[-cluster_compare_indx_S1], subsampling, replace=F )
} else {
cluster_compare_indx_S2 <- cluster_compare #keep everything in the predicted dat
}
genes_S2 <-ori_dat[M_or_DE_idx, c(cluster_select_indx_S2 , cluster_compare_indx_S2)]
predictor_S2 <-t(genes_S2)
#start prediction for estimating accuracy
predict_clusters<-predict(cvfit, newx = predictor_S2, type = "class", s = cvfit$lambda.min)
#to return the output as 5 lists
return(list(predict_clusters, predictor_S2, sub_cvfit_out, dat_DE_fm_DE, cvfit, predictor_S1,fit.lda ))
}
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Predict Species from the below"),
sidebarLayout(
sidebarPanel(
sliderInput("sliderSL", "What is the Sepal Length?", 4, 8, value = 5.1), #input of sepal.length for prediction.
sliderInput("sliderSW", "What is the Sepal Width?", 2, 5, value = 3.5), #input of sepal.width for prediction.
sliderInput("sliderPL", "What is the Petal Length?", 1, 7, value = 1.4), #input of petal.length for prediction.
sliderInput("sliderPW", "What is the Petal Width?", 0.1, 3, value = 0.2), #input of petal.width for prediction.
submitButton("Submit")
),
mainPanel(
h3("Predicted Species"),
textOutput("answer"),
)
)
)
) | /ProjectApp/ui.R | no_license | vwburnett/Shiny-App | R | false | false | 809 | r | library(shiny)
shinyUI(fluidPage(
titlePanel("Predict Species from the below"),
sidebarLayout(
sidebarPanel(
sliderInput("sliderSL", "What is the Sepal Length?", 4, 8, value = 5.1), #input of sepal.length for prediction.
sliderInput("sliderSW", "What is the Sepal Width?", 2, 5, value = 3.5), #input of sepal.width for prediction.
sliderInput("sliderPL", "What is the Petal Length?", 1, 7, value = 1.4), #input of petal.length for prediction.
sliderInput("sliderPW", "What is the Petal Width?", 0.1, 3, value = 0.2), #input of petal.width for prediction.
submitButton("Submit")
),
mainPanel(
h3("Predicted Species"),
textOutput("answer"),
)
)
)
) |
#' importFrom magrittr "%>%"
#' importFrom dplyr mutate n filter left_join select
#' importFrom tidyr pivot_longer
NULL
ellipseCenters <- function(alphaHypotheses, digits=5, txt = letters[1:3], fill=1, xradius = 2, yradius = 2, radianStart = NULL,
x=NULL, y=NULL, wchar='x'){
ntxt <- length(txt)
if (!is.null(x) && !is.null(y)){
if (length(x)!=ntxt) stop("length of x must match # hypotheses")
if (length(y)!=ntxt) stop("length of y must match # hypotheses")
}else{
if (is.null(radianStart)) radianStart <- if((ntxt)%%2!=0){pi*(1/2+1/ntxt)}else{
pi * (1 + 2 / ntxt) / 2}
if (!is.numeric(radianStart)) stop("radianStart must be numeric")
if (length(radianStart) != 1) stop("radianStart should be a single numeric value")
# compute middle of each rectangle
radian <- (radianStart - (0:(ntxt-1))/ntxt*2*pi) %% (2*pi)
x <- xradius * cos(radian)
y <- yradius * sin(radian)
}
# create data frame with middle (x and y) of ellipses, txt, fill
return(data.frame(x,y,
txt=paste(txt,'\n',wchar,'=',round(alphaHypotheses,digits),sep=""),
fill=as.factor(fill))
)
}
makeEllipseData <- function(x,xradius=.5,yradius=.5){
# hack to get ellipses around x,y with radii xradius and yradius
w <- xradius/3.1
h <- yradius/3.1
x$n <- 1:nrow(x)
ellipses <- rbind(x %>% dplyr::mutate(y=y+h),
x %>% dplyr::mutate(y=y-h),
x %>% dplyr::mutate(x=x+w),
x %>% dplyr::mutate(x=x-w)
)
ellipses$txt=""
return(ellipses)
}
makeTransitionSegments <- function(x, m, xradius, yradius, offset, trdigits, trprop, trhw, trhh){
# Create dataset records from transition matrix
md <- data.frame(m)
names(md) <- 1:nrow(m)
md <- md %>%
dplyr::mutate(from=1:dplyr::n()) %>%
# put transition weight in w
tidyr::pivot_longer(-from, names_to="to", values_to="w") %>%
dplyr::mutate(to=as.integer(to)) %>%
dplyr::filter(w > 0)
# Get ellipse center centers for transitions
y <- x %>% dplyr::select(x, y) %>% dplyr::mutate(from = 1:dplyr::n())
return(
md %>% dplyr::left_join(y, by = "from") %>%
dplyr::left_join(y %>% dplyr::transmute(to = from, xend = x, yend = y), by = "to") %>%
# Use ellipse centers, radii and offset to create points for line segments.
dplyr::mutate(theta=atan2((yend - y) * xradius, (xend - x) * yradius),
x1 = x, x1end = xend, y1 = y, y1end = yend,
x = x1 + xradius * cos(theta + offset),
y = y1 + yradius * sin(theta + offset),
xend = x1end + xradius * cos(theta + pi - offset),
yend = y1end + yradius * sin(theta + pi - offset),
xb = x + (xend - x) * trprop,
yb = y + (yend - y) * trprop,
xbmin = xb - trhw,
xbmax = xb + trhw,
ybmin = yb - trhh,
ybmax = yb + trhh,
txt = as.character(round(w,trdigits))
) %>%
dplyr::select(c(from, to, w, x, y, xend, yend, xb, yb, xbmin, xbmax, ybmin, ybmax, txt))
)
}
checkHGArgs <- function(nHypotheses, nameHypotheses, alphaHypotheses, m, fill,
palette, labels, legend, legend.name, legend.Position, halfwid, halfhgt,
trhw, trhh, trprop, digits, trdigits, size, boxtextsize,
arrowsize, radianStart, offset, xradius, yradius, x, y, wchar)
{ if (!is.character(nameHypotheses)) stop("Hypotheses should be in a vector of character strings")
ntxt <- length(nameHypotheses)
testthat::test_that("Each radius should be a single positive number",{
testthat::expect_type(xradius, "double")
testthat::expect_type(yradius, "double")
testthat::expect_equal(length(xradius),1)
testthat:: expect_equal(length(yradius),1)
testthat::expect_gt(xradius, 0)
testthat::expect_gt(yradius, 0)
})
# length of fill should be same as ntxt
if(length(fill) != 1 & length(fill) != ntxt) stop("fill must have length 1 or number of hypotheses")
}
#' @title Create multiplicity graph using ggplot2
#' @description \code{hGraph()} plots a multiplicity graph defined by user inputs.
#' The graph can also be used with the ***gMCP*** package to evaluate a set of nominal p-values for the tests of the hypotheses in the graph
#' @param nHypotheses number of hypotheses in graph
#' @param nameHypotheses hypothesis names
#' @param alphaHypotheses alpha-levels or weights for ellipses
#' @param m square transition matrix of dimension `nHypotheses`
#' @param fill grouping variable for hypotheses
#' @param palette colors for groups
#' @param labels text labels for groups
#' @param legend.name text for legend header
#' @param legend.position text string or x,y coordinates for legend
#' @param halfWid half width of ellipses
#' @param halfHgt half height of ellipses
#' @param trhw transition box width
#' @param trhh transition box height
#' @param trprop proportion of transition arrow length where transition box is placed
#' @param digits number of digits to show for alphaHypotheses
#' @param trdigits digits displayed for transition weights
#' @param size text size in ellipses
#' @param boxtextsize transition text size
#' @param arrowsize size of arrowhead for transition arrows
#' @param radianStart radians from origin for first ellipse; nodes spaced equally in clockwise order with centers on an ellipse by default
#' @param offset rotational offset in radians for transition weight arrows
#' @param xradius horizontal ellipse diameter on which ellipses are drawn
#' @param yradius vertical ellipse diameter on which ellipses are drawn
#' @param x x coordinates for hypothesis ellipses if elliptical arrangement is not wanted
#' @param y y coordinates for hypothesis ellipses if elliptical arrangement is not wanted
#' @param wchar character for alphaHypotheses in ellipses
#' @return A `ggplot` object with a multi-layer multiplicity graph
#' @examples
#' library(tidyr)
#' # Defaults: note clockwise ordering
#' hGraph(5)
#' # Add colors (default is 3 gray shades)
#' hGraph(3,fill=1:3)
#' # Colorblind palette
#' cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73",
#' "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
#' hGraph(6,fill=as.factor(1:6),palette=cbPalette)
#' # Use a hue palette
#' hGraph(4,fill=factor(1:4),palette=scales::hue_pal(l=75)(4))
#' # different alpha allocation, hypothesis names and transitions
#' alphaHypotheses <- c(.005,.007,.013)
#' nameHypotheses <- c("ORR","PFS","OS")
#' m <- matrix(c(0,1,0,
#' 0,0,1,
#' 1,0,0),nrow=3,byrow=TRUE)
#' hGraph(3,alphaHypotheses=alphaHypotheses,nameHypotheses=nameHypotheses,m=m)
#' # Custom position and size of ellipses, change text to multi-line text
#' # Adjust box width
#' # add legend in middle of plot
#' hGraph(3,x=sqrt(0:2),y=c(1,3,1.5),size=6,halfWid=.3,halfHgt=.3, trhw=0.6,
#' palette=cbPalette[2:4], fill = c(1, 2, 2),
#' legend.position = c(.6,.5), legend.name = "Legend:", labels = c("Group 1", "Group 2"),
#' nameHypotheses=c("H1:\n Long name","H2:\n Longer name","H3:\n Longest name"))
#' @details
#' See vignette **Multiplicity graphs formatting using ggplot2** for explanation of formatting.
#' @importFrom grDevices gray.colors
#' @importFrom ggplot2 aes ggplot guide_legend stat_ellipse theme theme_void geom_text geom_segment geom_rect scale_fill_manual
#' @importFrom grid unit
#' @rdname hGraph
#' @export
hGraph <- function(
nHypotheses = 4,
nameHypotheses = paste("H", (1:nHypotheses), sep = ""),
alphaHypotheses = 0.025/nHypotheses,
m = matrix(array(1/(nHypotheses - 1), nHypotheses^2),
nrow = nHypotheses) - diag(1/(nHypotheses - 1), nHypotheses),
fill = 1,
palette = grDevices::gray.colors(length(unique(fill)), start = .5, end = .8),
labels = LETTERS[1:length(unique(fill))],
legend.name = " ",
legend.position = "none",
halfWid = 0.5,
halfHgt = 0.5,
trhw = 0.1,
trhh = 0.075,
trprop = 1/3,
digits = 5,
trdigits = 2,
size = 6,
boxtextsize = 4,
arrowsize = 0.02,
radianStart = if((nHypotheses)%%2 != 0) {
pi * (1/2 + 1/nHypotheses) } else {
pi * (1 + 2/nHypotheses)/2 },
offset = pi/4/nHypotheses,
xradius = 2,
yradius = xradius,
x = NULL,
y = NULL,
wchar = if(as.character(Sys.info()[1])=="Windows"){'\u03b1'}else{'w'}
){
# Check inputs
checkHGArgs(nHypotheses, nameHypotheses, alphaHypotheses, m, fill,
palette, labels, legend, legend.name, legend.position, halfwid, halfhgt,
trhw, trhh, trprop, digits, trdigits, size, boxtextsize,
arrowsize, radianStart, offset, xradius, yradius, x, y, wchar)
# Set up hypothesis data
hData <- ellipseCenters(alphaHypotheses,
digits,
nameHypotheses,
fill = fill,
xradius = xradius,
yradius = yradius,
radianStart = radianStart,
x = x,
y = y,
wchar = wchar)
# Set up ellipse data
ellipseData <- hData %>% makeEllipseData(xradius = halfWid, yradius = halfHgt)
# Set up transition data
transitionSegments <- hData %>%
makeTransitionSegments(m, xradius = halfWid, yradius = halfHgt, offset = offset,
trprop = trprop, trdigits = trdigits, trhw = trhw, trhh = trhh)
# Layer the plot
ggplot()+
# plot ellipses
stat_ellipse(data=ellipseData,
aes(x=x, y=y, group=n, fill=as.factor(fill)),
geom="polygon") +
theme_void() +
#following should be needed
# scale_alpha(guide="none") +
scale_fill_manual(values=palette,
labels=labels,
guide_legend(legend.name)) +
theme(legend.position = legend.position) +
# Add text
geom_text(data=hData,aes(x=x,y=y,label=txt),size=size) +
# Add transition arrows
geom_segment(data = transitionSegments,
aes(x=x, y=y, xend=xend, yend=yend),
arrow = grid::arrow(length = grid::unit(arrowsize, "npc"))) +
# Add transition boxes
geom_rect(data = transitionSegments,
aes(xmin = xbmin, xmax = xbmax, ymin = ybmin, ymax = ybmax),
fill="white",color="black") +
# Add transition text
geom_text(data = transitionSegments, aes(x = xb, y = yb, label=txt), size = boxtextsize)
}
| /R/hgraph.r | no_license | danielwoodie/gsDesign | R | false | false | 10,561 | r | #' importFrom magrittr "%>%"
#' importFrom dplyr mutate n filter left_join select
#' importFrom tidyr pivot_longer
NULL
ellipseCenters <- function(alphaHypotheses, digits=5, txt = letters[1:3], fill=1, xradius = 2, yradius = 2, radianStart = NULL,
x=NULL, y=NULL, wchar='x'){
ntxt <- length(txt)
if (!is.null(x) && !is.null(y)){
if (length(x)!=ntxt) stop("length of x must match # hypotheses")
if (length(y)!=ntxt) stop("length of y must match # hypotheses")
}else{
if (is.null(radianStart)) radianStart <- if((ntxt)%%2!=0){pi*(1/2+1/ntxt)}else{
pi * (1 + 2 / ntxt) / 2}
if (!is.numeric(radianStart)) stop("radianStart must be numeric")
if (length(radianStart) != 1) stop("radianStart should be a single numeric value")
# compute middle of each rectangle
radian <- (radianStart - (0:(ntxt-1))/ntxt*2*pi) %% (2*pi)
x <- xradius * cos(radian)
y <- yradius * sin(radian)
}
# create data frame with middle (x and y) of ellipses, txt, fill
return(data.frame(x,y,
txt=paste(txt,'\n',wchar,'=',round(alphaHypotheses,digits),sep=""),
fill=as.factor(fill))
)
}
makeEllipseData <- function(x,xradius=.5,yradius=.5){
# hack to get ellipses around x,y with radii xradius and yradius
w <- xradius/3.1
h <- yradius/3.1
x$n <- 1:nrow(x)
ellipses <- rbind(x %>% dplyr::mutate(y=y+h),
x %>% dplyr::mutate(y=y-h),
x %>% dplyr::mutate(x=x+w),
x %>% dplyr::mutate(x=x-w)
)
ellipses$txt=""
return(ellipses)
}
makeTransitionSegments <- function(x, m, xradius, yradius, offset, trdigits, trprop, trhw, trhh){
# Create dataset records from transition matrix
md <- data.frame(m)
names(md) <- 1:nrow(m)
md <- md %>%
dplyr::mutate(from=1:dplyr::n()) %>%
# put transition weight in w
tidyr::pivot_longer(-from, names_to="to", values_to="w") %>%
dplyr::mutate(to=as.integer(to)) %>%
dplyr::filter(w > 0)
# Get ellipse center centers for transitions
y <- x %>% dplyr::select(x, y) %>% dplyr::mutate(from = 1:dplyr::n())
return(
md %>% dplyr::left_join(y, by = "from") %>%
dplyr::left_join(y %>% dplyr::transmute(to = from, xend = x, yend = y), by = "to") %>%
# Use ellipse centers, radii and offset to create points for line segments.
dplyr::mutate(theta=atan2((yend - y) * xradius, (xend - x) * yradius),
x1 = x, x1end = xend, y1 = y, y1end = yend,
x = x1 + xradius * cos(theta + offset),
y = y1 + yradius * sin(theta + offset),
xend = x1end + xradius * cos(theta + pi - offset),
yend = y1end + yradius * sin(theta + pi - offset),
xb = x + (xend - x) * trprop,
yb = y + (yend - y) * trprop,
xbmin = xb - trhw,
xbmax = xb + trhw,
ybmin = yb - trhh,
ybmax = yb + trhh,
txt = as.character(round(w,trdigits))
) %>%
dplyr::select(c(from, to, w, x, y, xend, yend, xb, yb, xbmin, xbmax, ybmin, ybmax, txt))
)
}
checkHGArgs <- function(nHypotheses, nameHypotheses, alphaHypotheses, m, fill,
palette, labels, legend, legend.name, legend.Position, halfwid, halfhgt,
trhw, trhh, trprop, digits, trdigits, size, boxtextsize,
arrowsize, radianStart, offset, xradius, yradius, x, y, wchar)
{ if (!is.character(nameHypotheses)) stop("Hypotheses should be in a vector of character strings")
ntxt <- length(nameHypotheses)
testthat::test_that("Each radius should be a single positive number",{
testthat::expect_type(xradius, "double")
testthat::expect_type(yradius, "double")
testthat::expect_equal(length(xradius),1)
testthat:: expect_equal(length(yradius),1)
testthat::expect_gt(xradius, 0)
testthat::expect_gt(yradius, 0)
})
# length of fill should be same as ntxt
if(length(fill) != 1 & length(fill) != ntxt) stop("fill must have length 1 or number of hypotheses")
}
#' @title Create multiplicity graph using ggplot2
#' @description \code{hGraph()} plots a multiplicity graph defined by user inputs.
#' The graph can also be used with the ***gMCP*** package to evaluate a set of nominal p-values for the tests of the hypotheses in the graph
#' @param nHypotheses number of hypotheses in graph
#' @param nameHypotheses hypothesis names
#' @param alphaHypotheses alpha-levels or weights for ellipses
#' @param m square transition matrix of dimension `nHypotheses`
#' @param fill grouping variable for hypotheses
#' @param palette colors for groups
#' @param labels text labels for groups
#' @param legend.name text for legend header
#' @param legend.position text string or x,y coordinates for legend
#' @param halfWid half width of ellipses
#' @param halfHgt half height of ellipses
#' @param trhw transition box width
#' @param trhh transition box height
#' @param trprop proportion of transition arrow length where transition box is placed
#' @param digits number of digits to show for alphaHypotheses
#' @param trdigits digits displayed for transition weights
#' @param size text size in ellipses
#' @param boxtextsize transition text size
#' @param arrowsize size of arrowhead for transition arrows
#' @param radianStart radians from origin for first ellipse; nodes spaced equally in clockwise order with centers on an ellipse by default
#' @param offset rotational offset in radians for transition weight arrows
#' @param xradius horizontal ellipse diameter on which ellipses are drawn
#' @param yradius vertical ellipse diameter on which ellipses are drawn
#' @param x x coordinates for hypothesis ellipses if elliptical arrangement is not wanted
#' @param y y coordinates for hypothesis ellipses if elliptical arrangement is not wanted
#' @param wchar character for alphaHypotheses in ellipses
#' @return A `ggplot` object with a multi-layer multiplicity graph
#' @examples
#' library(tidyr)
#' # Defaults: note clockwise ordering
#' hGraph(5)
#' # Add colors (default is 3 gray shades)
#' hGraph(3,fill=1:3)
#' # Colorblind palette
#' cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73",
#' "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
#' hGraph(6,fill=as.factor(1:6),palette=cbPalette)
#' # Use a hue palette
#' hGraph(4,fill=factor(1:4),palette=scales::hue_pal(l=75)(4))
#' # different alpha allocation, hypothesis names and transitions
#' alphaHypotheses <- c(.005,.007,.013)
#' nameHypotheses <- c("ORR","PFS","OS")
#' m <- matrix(c(0,1,0,
#' 0,0,1,
#' 1,0,0),nrow=3,byrow=TRUE)
#' hGraph(3,alphaHypotheses=alphaHypotheses,nameHypotheses=nameHypotheses,m=m)
#' # Custom position and size of ellipses, change text to multi-line text
#' # Adjust box width
#' # add legend in middle of plot
#' hGraph(3,x=sqrt(0:2),y=c(1,3,1.5),size=6,halfWid=.3,halfHgt=.3, trhw=0.6,
#' palette=cbPalette[2:4], fill = c(1, 2, 2),
#' legend.position = c(.6,.5), legend.name = "Legend:", labels = c("Group 1", "Group 2"),
#' nameHypotheses=c("H1:\n Long name","H2:\n Longer name","H3:\n Longest name"))
#' @details
#' See vignette **Multiplicity graphs formatting using ggplot2** for explanation of formatting.
#' @importFrom grDevices gray.colors
#' @importFrom ggplot2 aes ggplot guide_legend stat_ellipse theme theme_void geom_text geom_segment geom_rect scale_fill_manual
#' @importFrom grid unit
#' @rdname hGraph
#' @export
hGraph <- function(
nHypotheses = 4,
nameHypotheses = paste("H", (1:nHypotheses), sep = ""),
alphaHypotheses = 0.025/nHypotheses,
m = matrix(array(1/(nHypotheses - 1), nHypotheses^2),
nrow = nHypotheses) - diag(1/(nHypotheses - 1), nHypotheses),
fill = 1,
palette = grDevices::gray.colors(length(unique(fill)), start = .5, end = .8),
labels = LETTERS[1:length(unique(fill))],
legend.name = " ",
legend.position = "none",
halfWid = 0.5,
halfHgt = 0.5,
trhw = 0.1,
trhh = 0.075,
trprop = 1/3,
digits = 5,
trdigits = 2,
size = 6,
boxtextsize = 4,
arrowsize = 0.02,
radianStart = if((nHypotheses)%%2 != 0) {
pi * (1/2 + 1/nHypotheses) } else {
pi * (1 + 2/nHypotheses)/2 },
offset = pi/4/nHypotheses,
xradius = 2,
yradius = xradius,
x = NULL,
y = NULL,
wchar = if(as.character(Sys.info()[1])=="Windows"){'\u03b1'}else{'w'}
){
# Check inputs
checkHGArgs(nHypotheses, nameHypotheses, alphaHypotheses, m, fill,
palette, labels, legend, legend.name, legend.position, halfwid, halfhgt,
trhw, trhh, trprop, digits, trdigits, size, boxtextsize,
arrowsize, radianStart, offset, xradius, yradius, x, y, wchar)
# Set up hypothesis data
hData <- ellipseCenters(alphaHypotheses,
digits,
nameHypotheses,
fill = fill,
xradius = xradius,
yradius = yradius,
radianStart = radianStart,
x = x,
y = y,
wchar = wchar)
# Set up ellipse data
ellipseData <- hData %>% makeEllipseData(xradius = halfWid, yradius = halfHgt)
# Set up transition data
transitionSegments <- hData %>%
makeTransitionSegments(m, xradius = halfWid, yradius = halfHgt, offset = offset,
trprop = trprop, trdigits = trdigits, trhw = trhw, trhh = trhh)
# Layer the plot
ggplot()+
# plot ellipses
stat_ellipse(data=ellipseData,
aes(x=x, y=y, group=n, fill=as.factor(fill)),
geom="polygon") +
theme_void() +
#following should be needed
# scale_alpha(guide="none") +
scale_fill_manual(values=palette,
labels=labels,
guide_legend(legend.name)) +
theme(legend.position = legend.position) +
# Add text
geom_text(data=hData,aes(x=x,y=y,label=txt),size=size) +
# Add transition arrows
geom_segment(data = transitionSegments,
aes(x=x, y=y, xend=xend, yend=yend),
arrow = grid::arrow(length = grid::unit(arrowsize, "npc"))) +
# Add transition boxes
geom_rect(data = transitionSegments,
aes(xmin = xbmin, xmax = xbmax, ymin = ybmin, ymax = ybmax),
fill="white",color="black") +
# Add transition text
geom_text(data = transitionSegments, aes(x = xb, y = yb, label=txt), size = boxtextsize)
}
|
install.packages('readxl')
library(readxl)
d <- read_excel('/Users/juggs/Desktop/Personal/Internship/datasets/transport uk/Private vehicle.xlsx') # thousands (private vehicle)
d <- d[-c(1,2),]
d <- d[-c(1:5),2]
colnames(d) <- c('Year','Private vehicle','Motorcycles, scooters and mopeds')
private_vehicle <- d$`Private vehicle`
private_vehicle <- data.frame(private_vehicle)
private_vehicle <- private_vehicle[-c(1:5),]
private_vehicle <- as.data.frame(private_vehicle)
colnames(private_vehicle) <- 'private vehicle'
private_vehicle <- as.ts(private_vehicle)
rownames(private_vehicle) <- c('1950','1951','1952','1953','1954','1955','1956','1957','1958','1959','1960','1961','1962','1963','1964','1965','1966','1967','1968','1969','1970','1971','1972','1973','1974','1975','1976','1977','1978','1979','1980','1981','1982','1983','1984','1985','1986','1987','1988','1989','1990','1991','1992','1993','1994','1995','1996','1997','1998','1999','2000','2001','2002','2003','2004','2005','2006','2007','2008','2009','2010','2011','2012','2013','2014','2015','2016','2017','2018')
small_private_vehicle <- d$`Motorcycles, scooters and mopeds`
small_private_vehicle <- data.frame(small_private_vehicle)
small_private_vehicle <- small_private_vehicle[-c(1:5),]
small_private_vehicle <- as.data.frame(small_private_vehicle)
colnames(small_private_vehicle) <- 'motorcycles, scooters and mopeds'
small_private_vehicle <- as.ts(small_private_vehicle)
row.names(small_private_vehicle) <- c('1950','1951','1952','1953','1954','1955','1956','1957','1958','1959','1960','1961','1962','1963','1964','1965','1966','1967','1968','1969','1970','1971','1972','1973','1974','1975','1976','1977','1978','1979','1980','1981','1982','1983','1984','1985','1986','1987','1988','1989','1990','1991','1992','1993','1994','1995','1996','1997','1998','1999','2000','2001','2002','2003','2004','2005','2006','2007','2008','2009','2010','2011','2012','2013','2014','2015','2016','2017','2018')
###################################################
boxplot(private_vehicle ~ cycle(private_vehicle))
plot(diff(log(private_vehicle)))
plot(log(private_vehicle))
plot(diff(private_vehicle))
acf(private_vehicle)
acf(diff(log(private_vehicle))) # q=1 (because the line before the line which first gets inverted is 1)
pacf(diff(log(private_vehicle)))# p=0 (logic same as above)
fit <- arima(log(private_vehicle),c(0,1,1),seasonal = list(order = c(0,1,1), period = 1)) # arima model main line
pred <- predict(fit, n.ahead = 5)
pred1 <- 2.718^pred$pred
ts.plot(private_vehicle,2.718^pred$pred, log = "y", lty = c(1,3)) #plot for prediction and previous data
#testing out model
datawide <- ts(private_vehicle, frequency = 1, start = c(1950),end = c(2017))
fit1 <- arima(log(datawide),c(0,1,1),seasonal = list(order = c(0,1,1),period = 1)) #second arima model without the last year
pred2 <- predict(fit1, n.ahead = 6)
pred3 <- 2.718^pred2$pred
data <- head(pred3,1)
predict_2018 <- round(data,digits = 0)
original_2018 <- tail(private_vehicle)
ts.plot(datawide,2.718^pred2$pred, log = "y", lty =c(1,3))
mean(private_vehicle)
sd(private_vehicle)
mean(pred1)
sd(pred1)
##################################3
boxplot(small_private_vehicle ~ cycle(small_private_vehicle))
plot(diff(log(small_private_vehicle)))
plot(log(small_private_vehicle))
plot(diff(small_private_vehicle))
acf(small_private_vehicle)
acf(diff(log(small_private_vehicle))) # q=6 (because the line before the line which first gets inverted is 1)
pacf(diff(log(small_private_vehicle)))# p=4 (logic same as above)
fit <- arima(log(small_private_vehicle),c(4,1,6),seasonal = list(order = c(0,1,1), period = 1)) # arima model main line
pred <- predict(fit, n.ahead = 5)
pred1 <- 2.718^pred$pred
ts.plot(small_private_vehicle,2.718^pred$pred, log = "y", lty = c(1,3)) #plot for prediction and previous data
#testing out model
datawide <- ts(small_private_vehicle, frequency = 1, start = c(1950),end = c(2017))
fit1 <- arima(log(datawide),c(0,1,1),seasonal = list(order = c(0,1,1),period = 1)) #second arima model without the last year
pred2 <- predict(fit1, n.ahead = 6)
pred3 <- 2.718^pred2$pred
data <- head(pred3,1)
predict_2018 <- round(data,digits = 0)
original_2018 <- tail(small_private_vehicle)
ts.plot(datawide,2.718^pred2$pred, log = "y", lty =c(1,3))
mean(small_private_vehicle)
sd(small_private_vehicle)
mean(pred1)
sd(pred1)
| /private vehicle.R | no_license | abhishekjaglan/Internship-Covid-19 | R | false | false | 4,375 | r | install.packages('readxl')
library(readxl)
d <- read_excel('/Users/juggs/Desktop/Personal/Internship/datasets/transport uk/Private vehicle.xlsx') # thousands (private vehicle)
d <- d[-c(1,2),]
d <- d[-c(1:5),2]
colnames(d) <- c('Year','Private vehicle','Motorcycles, scooters and mopeds')
private_vehicle <- d$`Private vehicle`
private_vehicle <- data.frame(private_vehicle)
private_vehicle <- private_vehicle[-c(1:5),]
private_vehicle <- as.data.frame(private_vehicle)
colnames(private_vehicle) <- 'private vehicle'
private_vehicle <- as.ts(private_vehicle)
rownames(private_vehicle) <- c('1950','1951','1952','1953','1954','1955','1956','1957','1958','1959','1960','1961','1962','1963','1964','1965','1966','1967','1968','1969','1970','1971','1972','1973','1974','1975','1976','1977','1978','1979','1980','1981','1982','1983','1984','1985','1986','1987','1988','1989','1990','1991','1992','1993','1994','1995','1996','1997','1998','1999','2000','2001','2002','2003','2004','2005','2006','2007','2008','2009','2010','2011','2012','2013','2014','2015','2016','2017','2018')
small_private_vehicle <- d$`Motorcycles, scooters and mopeds`
small_private_vehicle <- data.frame(small_private_vehicle)
small_private_vehicle <- small_private_vehicle[-c(1:5),]
small_private_vehicle <- as.data.frame(small_private_vehicle)
colnames(small_private_vehicle) <- 'motorcycles, scooters and mopeds'
small_private_vehicle <- as.ts(small_private_vehicle)
row.names(small_private_vehicle) <- c('1950','1951','1952','1953','1954','1955','1956','1957','1958','1959','1960','1961','1962','1963','1964','1965','1966','1967','1968','1969','1970','1971','1972','1973','1974','1975','1976','1977','1978','1979','1980','1981','1982','1983','1984','1985','1986','1987','1988','1989','1990','1991','1992','1993','1994','1995','1996','1997','1998','1999','2000','2001','2002','2003','2004','2005','2006','2007','2008','2009','2010','2011','2012','2013','2014','2015','2016','2017','2018')
###################################################
boxplot(private_vehicle ~ cycle(private_vehicle))
plot(diff(log(private_vehicle)))
plot(log(private_vehicle))
plot(diff(private_vehicle))
acf(private_vehicle)
acf(diff(log(private_vehicle))) # q=1 (because the line before the line which first gets inverted is 1)
pacf(diff(log(private_vehicle)))# p=0 (logic same as above)
fit <- arima(log(private_vehicle),c(0,1,1),seasonal = list(order = c(0,1,1), period = 1)) # arima model main line
pred <- predict(fit, n.ahead = 5)
pred1 <- 2.718^pred$pred
ts.plot(private_vehicle,2.718^pred$pred, log = "y", lty = c(1,3)) #plot for prediction and previous data
#testing out model
datawide <- ts(private_vehicle, frequency = 1, start = c(1950),end = c(2017))
fit1 <- arima(log(datawide),c(0,1,1),seasonal = list(order = c(0,1,1),period = 1)) #second arima model without the last year
pred2 <- predict(fit1, n.ahead = 6)
pred3 <- 2.718^pred2$pred
data <- head(pred3,1)
predict_2018 <- round(data,digits = 0)
original_2018 <- tail(private_vehicle)
ts.plot(datawide,2.718^pred2$pred, log = "y", lty =c(1,3))
mean(private_vehicle)
sd(private_vehicle)
mean(pred1)
sd(pred1)
##################################3
boxplot(small_private_vehicle ~ cycle(small_private_vehicle))
plot(diff(log(small_private_vehicle)))
plot(log(small_private_vehicle))
plot(diff(small_private_vehicle))
acf(small_private_vehicle)
acf(diff(log(small_private_vehicle))) # q=6 (because the line before the line which first gets inverted is 1)
pacf(diff(log(small_private_vehicle)))# p=4 (logic same as above)
fit <- arima(log(small_private_vehicle),c(4,1,6),seasonal = list(order = c(0,1,1), period = 1)) # arima model main line
pred <- predict(fit, n.ahead = 5)
pred1 <- 2.718^pred$pred
ts.plot(small_private_vehicle,2.718^pred$pred, log = "y", lty = c(1,3)) #plot for prediction and previous data
#testing out model
datawide <- ts(small_private_vehicle, frequency = 1, start = c(1950),end = c(2017))
fit1 <- arima(log(datawide),c(0,1,1),seasonal = list(order = c(0,1,1),period = 1)) #second arima model without the last year
pred2 <- predict(fit1, n.ahead = 6)
pred3 <- 2.718^pred2$pred
data <- head(pred3,1)
predict_2018 <- round(data,digits = 0)
original_2018 <- tail(small_private_vehicle)
ts.plot(datawide,2.718^pred2$pred, log = "y", lty =c(1,3))
mean(small_private_vehicle)
sd(small_private_vehicle)
mean(pred1)
sd(pred1)
|
#Clear lists
rm(list = ls())
#Set working Directory
setwd("C:/Users/amr418/Desktop/CYCLESOutput")
# Create shortcut root directory name
Root.dir<- c("C:/Users/amr418/Desktop/CYCLESOutput")
# Define folders to parse through
Location<-c("Lebanon","RockSpring","Beltsville");
Soil<-c("Soil1","Soil2");
Fertilizer<-c("Manure","SynFert");
Management<-c("Fallow","CC","AllRye","RyeStover","FertRye","FertRyeStover");
# Collect all seasonal data for a location in this list
results <- list()
# For Loop that runs through every terminal node in folder
for (l in Location) {
for (k in Soil) {
for (j in Fertilizer) {
for (i in Management) {
work.dir.name<-file.path(Root.dir,l,k,j,i);
#Grab season data from 2 season files output from 2 rotations (corn-soybean, soybean-corn)
season_data_1<-read.table(file.path(work.dir.name,"season.dat"),header = TRUE, skip = 1);
season_data_2<-read.table(file.path(work.dir.name,"season2.dat"),header = TRUE, skip = 1);
season_data<- rbind(season_data_1, season_data_2);
colnames(season_data) <- c("DATE","CROP","TOTAL_BIOMASS","ROOT_BIOMASS","GRAIN","FORAGE","RESIDUE","HI","POT_TRANS","ACT_TRANS","SOIL_EVAP","TOTAL_N","ROOT_N","GRAIN_N","FORAGE_N","N_STRESS","N_HARVEST","N_RESIDUE","%N_FORAGE");
#### Calculate average and standard deviation in output for each crop
subset_maize <- season_data[which(season_data[,2] == "Maize"),3:ncol(season_data)]
maize_avg <- apply(subset_maize,2,mean,na.rm = TRUE)
maize_sd <- apply(subset_maize,2,sd,na.rm=TRUE)
subset_soybean <- season_data[which(season_data[,2] == "Soybean"),3:ncol(season_data)]
soybean_avg <- apply(subset_soybean,2,mean,na.rm = TRUE)
soybean_sd <- apply(subset_soybean,2,sd,na.rm=TRUE)
subset_rye_bm <- season_data[which(season_data[,2] == "RyeBioenergyBM"),3:ncol(season_data)]
rye_bm_avg <- apply(subset_rye_bm,2,mean,na.rm = TRUE)
rye_bm_sd <- apply(subset_rye_bm,2,sd,na.rm = TRUE)
subset_rye_bs <- season_data[which(season_data[,2] == "RyeBioenergyBS"),3:ncol(season_data)]
rye_bs_avg <- apply(subset_rye_bs,2,mean,na.rm = TRUE)
rye_bs_sd <- apply(subset_rye_bs,2,sd,na.rm = TRUE)
subset_triticale <- season_data[which(season_data[,2] == "Triticale"),3:ncol(season_data)]
triticale_avg <- apply(subset_triticale,2,mean,na.rm = TRUE)
triticale_sd <- apply(subset_triticale,2,sd,na.rm = TRUE)
crop_avg = data.frame(rbind(maize_avg,soybean_avg, rye_bm_avg, rye_bs_avg, triticale_avg,maize_sd,soybean_sd, rye_bm_sd, rye_bs_sd, triticale_sd, row.names = NULL));
rownames(crop_avg) <- c("maize","soybean", "rye_bm","rye_bs","triticale","maize_sd","soybean_sd","rye_bm_sd","rye_bs_sd","triticale_sd");
crop_avg$Location<-rep(paste(l),length(row.names(crop_avg)));
crop_avg$Soil<-rep(paste(k),length(row.names(crop_avg)));
crop_avg$Fertilizer<-rep(paste(j),length(row.names(crop_avg)));
crop_avg$Management<-rep(paste(i),length(row.names(crop_avg)));
results <- rbind(results,crop_avg)
write.csv(results,"season_test.csv")
}
}
}
}
#####
#SUMMARY
#####
# Collect all season data for a location in this list
summary <- list()
# For Loop that runs through every terminal node in folder
for (l in Location) {
for (k in Soil) {
for (j in Fertilizer) {
for (i in Management) {
work.dir.name<-file.path(Root.dir,l,k,j,i);
#Grab season data from 2 season files output from 2 rotations (corn-soybean, soybean-corn)
summary_data_1<-read.table(file.path(work.dir.name,"summary.dat"), skip = 2,nrows = 1);
summary_data_2<-read.table(file.path(work.dir.name,"summary2.dat"), skip = 2, nrows = 1);
summary_data_3<-read.table(file.path(work.dir.name,"summary.dat"), skip = 6, nrows = 1);
summary_data_4<-read.table(file.path(work.dir.name,"summary2.dat"), skip = 6, nrows = 1);
# Combine rows from different rotations
Row1 <- rbind(summary_data_1[1,],summary_data_2[1,])
Row2 <- rbind(summary_data_3[1,],summary_data_4[1,])
# Combine columns from differe rotations
Row_all <-cbind(Row1,Row2)
# Average and standard deviation of Data
summary_avg <- apply(Row_all,2,mean,na.rm = TRUE)
summary_sd <- apply(Row_all,2,sd,na.rm = TRUE)
crop_summary = data.frame(rbind(summary_avg,summary_sd))
colnames(crop_summary) <- c("Init_Prof_C","Fin_Prof_C","Prof_C_Diff", "Res_C_Input", "Root_C_Input","Hum_C", "Resp_C", "Resp_Res_C", "Ret_Res","Prod_Root", "Soil_C_Ch_per_yr","Avg_Gross_N_Min","Avg_N_Imm", "Avg_Net_Min","Avg_NH4_Nitr","Avg_N20_Nitr", "Avg_NH3_Vol","Avg_NO3_Denit","Avg_N2O_Denit","Avg_Nit_Leach","Avg_Amm_Leach","Avg_Total_N20_Emm");
crop_summary$Location<-rep(paste(l),length(row.names(crop_summary)));
crop_summary$Soil<-rep(paste(k),length(row.names(crop_summary)));
crop_summary$Fertilizer<-rep(paste(j),length(row.names(crop_summary)));
crop_summary$Management<-rep(paste(i),length(row.names(crop_summary)));
summary <- rbind(summary,crop_summary)
write.csv(summary,"scenario_test.csv")
}
}
}
}
| /CyclesDataSummary.R | no_license | aramcharan/CNCycling | R | false | false | 5,467 | r | #Clear lists
rm(list = ls())
#Set working Directory
setwd("C:/Users/amr418/Desktop/CYCLESOutput")
# Create shortcut root directory name
Root.dir<- c("C:/Users/amr418/Desktop/CYCLESOutput")
# Define folders to parse through
Location<-c("Lebanon","RockSpring","Beltsville");
Soil<-c("Soil1","Soil2");
Fertilizer<-c("Manure","SynFert");
Management<-c("Fallow","CC","AllRye","RyeStover","FertRye","FertRyeStover");
# Collect all seasonal data for a location in this list
results <- list()
# For Loop that runs through every terminal node in folder
for (l in Location) {
for (k in Soil) {
for (j in Fertilizer) {
for (i in Management) {
work.dir.name<-file.path(Root.dir,l,k,j,i);
#Grab season data from 2 season files output from 2 rotations (corn-soybean, soybean-corn)
season_data_1<-read.table(file.path(work.dir.name,"season.dat"),header = TRUE, skip = 1);
season_data_2<-read.table(file.path(work.dir.name,"season2.dat"),header = TRUE, skip = 1);
season_data<- rbind(season_data_1, season_data_2);
colnames(season_data) <- c("DATE","CROP","TOTAL_BIOMASS","ROOT_BIOMASS","GRAIN","FORAGE","RESIDUE","HI","POT_TRANS","ACT_TRANS","SOIL_EVAP","TOTAL_N","ROOT_N","GRAIN_N","FORAGE_N","N_STRESS","N_HARVEST","N_RESIDUE","%N_FORAGE");
#### Calculate average and standard deviation in output for each crop
subset_maize <- season_data[which(season_data[,2] == "Maize"),3:ncol(season_data)]
maize_avg <- apply(subset_maize,2,mean,na.rm = TRUE)
maize_sd <- apply(subset_maize,2,sd,na.rm=TRUE)
subset_soybean <- season_data[which(season_data[,2] == "Soybean"),3:ncol(season_data)]
soybean_avg <- apply(subset_soybean,2,mean,na.rm = TRUE)
soybean_sd <- apply(subset_soybean,2,sd,na.rm=TRUE)
subset_rye_bm <- season_data[which(season_data[,2] == "RyeBioenergyBM"),3:ncol(season_data)]
rye_bm_avg <- apply(subset_rye_bm,2,mean,na.rm = TRUE)
rye_bm_sd <- apply(subset_rye_bm,2,sd,na.rm = TRUE)
subset_rye_bs <- season_data[which(season_data[,2] == "RyeBioenergyBS"),3:ncol(season_data)]
rye_bs_avg <- apply(subset_rye_bs,2,mean,na.rm = TRUE)
rye_bs_sd <- apply(subset_rye_bs,2,sd,na.rm = TRUE)
subset_triticale <- season_data[which(season_data[,2] == "Triticale"),3:ncol(season_data)]
triticale_avg <- apply(subset_triticale,2,mean,na.rm = TRUE)
triticale_sd <- apply(subset_triticale,2,sd,na.rm = TRUE)
crop_avg = data.frame(rbind(maize_avg,soybean_avg, rye_bm_avg, rye_bs_avg, triticale_avg,maize_sd,soybean_sd, rye_bm_sd, rye_bs_sd, triticale_sd, row.names = NULL));
rownames(crop_avg) <- c("maize","soybean", "rye_bm","rye_bs","triticale","maize_sd","soybean_sd","rye_bm_sd","rye_bs_sd","triticale_sd");
crop_avg$Location<-rep(paste(l),length(row.names(crop_avg)));
crop_avg$Soil<-rep(paste(k),length(row.names(crop_avg)));
crop_avg$Fertilizer<-rep(paste(j),length(row.names(crop_avg)));
crop_avg$Management<-rep(paste(i),length(row.names(crop_avg)));
results <- rbind(results,crop_avg)
write.csv(results,"season_test.csv")
}
}
}
}
#####
#SUMMARY
#####
# Collect all season data for a location in this list
summary <- list()
# For Loop that runs through every terminal node in folder
for (l in Location) {
for (k in Soil) {
for (j in Fertilizer) {
for (i in Management) {
work.dir.name<-file.path(Root.dir,l,k,j,i);
#Grab season data from 2 season files output from 2 rotations (corn-soybean, soybean-corn)
summary_data_1<-read.table(file.path(work.dir.name,"summary.dat"), skip = 2,nrows = 1);
summary_data_2<-read.table(file.path(work.dir.name,"summary2.dat"), skip = 2, nrows = 1);
summary_data_3<-read.table(file.path(work.dir.name,"summary.dat"), skip = 6, nrows = 1);
summary_data_4<-read.table(file.path(work.dir.name,"summary2.dat"), skip = 6, nrows = 1);
# Combine rows from different rotations
Row1 <- rbind(summary_data_1[1,],summary_data_2[1,])
Row2 <- rbind(summary_data_3[1,],summary_data_4[1,])
# Combine columns from differe rotations
Row_all <-cbind(Row1,Row2)
# Average and standard deviation of Data
summary_avg <- apply(Row_all,2,mean,na.rm = TRUE)
summary_sd <- apply(Row_all,2,sd,na.rm = TRUE)
crop_summary = data.frame(rbind(summary_avg,summary_sd))
colnames(crop_summary) <- c("Init_Prof_C","Fin_Prof_C","Prof_C_Diff", "Res_C_Input", "Root_C_Input","Hum_C", "Resp_C", "Resp_Res_C", "Ret_Res","Prod_Root", "Soil_C_Ch_per_yr","Avg_Gross_N_Min","Avg_N_Imm", "Avg_Net_Min","Avg_NH4_Nitr","Avg_N20_Nitr", "Avg_NH3_Vol","Avg_NO3_Denit","Avg_N2O_Denit","Avg_Nit_Leach","Avg_Amm_Leach","Avg_Total_N20_Emm");
crop_summary$Location<-rep(paste(l),length(row.names(crop_summary)));
crop_summary$Soil<-rep(paste(k),length(row.names(crop_summary)));
crop_summary$Fertilizer<-rep(paste(j),length(row.names(crop_summary)));
crop_summary$Management<-rep(paste(i),length(row.names(crop_summary)));
summary <- rbind(summary,crop_summary)
write.csv(summary,"scenario_test.csv")
}
}
}
}
|
#' Calculate population dynamics from MP recommendation
#'
#' An internal function to calculate the population dynamics for the next time
#' step based on the recent MP recommendation
#'
#' @param MPRecs A named list of MP recommendations. The names are the same as `slotNames('Rec')`, except
#' for `Misc`. Each element in the list is a matrix. With the expection of `Spatial`, all elements in list
#' have `nrow=1` and `ncol=nsim`. `Spatial` has `nrow=nareas`. Matrices can be empty matrix, populated with all NAs
#' (both mean no change in management with respect to this element (e.g. `Effort`)), or populated with a recommendation.
#' MPs must either return a recommendation or no recommendation for every simulation for a particular slot (i.e. cannot have some NA and some values).
#' @param y The projection year
#' @param nyears The number of historical years
#' @param proyears The number of projection years
#' @param nsim The number of simulations
#' @param Biomass_P An array with dimensions `nsim`, `maxage`, `proyears`, and `nareas` with total biomass in the projection years
#' @param VBiomass_P An array with dimensions `nsim`, `maxage`, `proyears`, and `nareas` with vulnerable biomass in the projection years
#' @param LastTAE A vector of length `nsim` with the most recent TAE
#' @param LastSpatial A matrix of `nrow=nareas` and `ncol=nsim` with the most recent spatial management arrangements
#' @param LastAllocat A vector of length `nsim` with the most recent allocation
#' @param LastTAC A vector of length `nsim` with the most recent TAC
#' @param TACused A vector of length `nsim` with the most recent TAC
#' @param maxF A numeric value with maximum allowed F. From `OM@maxF`
#' @param LR5_P A matrix with `nyears+proyears` rows and `nsim` columns with the first length at 5 percent retention.
#' @param LFR_P A matrix with `nyears+proyears` rows and `nsim` columns with the first length at full retention.
#' @param Rmaxlen_P A matrix with `nyears+proyears` rows and `nsim` columns with the retention at maximum length.
#' @param retL_P An array with dimensions `nsim`, `nCALbins` and `nyears+proyears` with retention at length
#' @param retA_P An array with dimensions `nsim`, `maxage` and `nyears+proyears` with retention at age
#' @param L5_P A matrix with `nyears+proyears` rows and `nsim` columns with the first length at 5 percent selectivity
#' @param LFS_P A matrix with `nyears+proyears` rows and `nsim` columns with the first length at full selectivity
#' @param Vmaxlen_P A matrix with `nyears+proyears` rows and `nsim` columns with the selectivity at maximum length.
#' @param SLarray_P An array with dimensions `nsim`, `nCALbins` and `nyears+proyears` with selectivity at length
#' @param V_P An array with dimensions `nsim`, `maxage` and `nyears+proyears` with selectivity at age
#' @param Fdisc_P vector of length `nsim` with discard mortality. From `OM@Fdisc` but can be updated by MP (`Rec@Fdisc`)
#' @param DR_P A matrix with `nyears+proyears` rows and `nsim` columns with the fraction discarded.
#' @param M_ageArray An array with dimensions `nsim`, `maxage` and `nyears+proyears` with natural mortality at age
#' @param FM_P An array with dimensions `nsim`, `maxage`, `proyears`, and `nareas` with total fishing mortality
#' @param FM_Pret An array with dimensions `nsim`, `maxage`, `proyears`, and `nareas` with fishing mortality of the retained fish
#' @param Z_P An array with dimensions `nsim`, `maxage`, `proyears`, and `nareas` with total mortality
#' @param CB_P An array with dimensions `nsim`, `maxage`, `proyears`, and `nareas` with total catch
#' @param CB_Pret An array with dimensions `nsim`, `maxage`, `proyears`, and `nareas` with retained catch
#' @param TAC_f A matrix with `nsim` rows and `proyears` columns with the TAC implementation error
#' @param E_f A matrix with `nsim` rows and `proyears` columns with the effort implementation error
#' @param SizeLim_f A matrix with `nsim` rows and `proyears` columns with the size limit implementation error
#' @param FinF A numeric vector of length `nsim` with fishing mortality in the last historical year
#' @param Spat_targ A numeric vector of length `nsim` with spatial targeting
#' @param CAL_binsmid A numeric vector of length `nCALbins` with mid-points of the CAL bins
#' @param Linf A numeric vector of length `nsim` with Linf (from `Stock@Linf`)
#' @param Len_age An array with dimensions `nsim`, `maxage`, and `nyears+proyears` with length-at-age
#' @param maxage A numeric value with maximum age from `Stock@maxage`
#' @param nareas A numeric value with number of areas
#' @param Asize A matrix with `nsim` rows and `nareas` columns with the relative size of each area
#' @param nCALbins The number of CAL bins. Should be the same as `length(CAL_binsmid)`
#' @param qs A numeric vector of length `nsim` with catchability coefficient
#' @param qvar A matrix with `nsim` rows and `proyears` columns with catchability variability
#' @param qinc A numeric vector of length `nsim` with average annual change in catchability
#' @param Effort_pot A numeric vector of potential effort
#' @param checks Logical. Run internal checks? Currently not used.
#'
#' @return A named list with updated population dynamics
#' @author A. Hordyk
#' @export
#'
#' @keywords internal
CalcMPDynamics <- function(MPRecs, y, nyears, proyears, nsim, Biomass_P,
VBiomass_P,
LastTAE, histTAE, LastSpatial, LastAllocat, LastTAC,
TACused, maxF,
LR5_P, LFR_P, Rmaxlen_P, retL_P, retA_P,
L5_P, LFS_P, Vmaxlen_P, SLarray_P, V_P,
Fdisc_P, DR_P,
M_ageArray, FM_P, FM_Pret, Z_P, CB_P, CB_Pret,
TAC_f, E_f, SizeLim_f,
FinF, Spat_targ,
CAL_binsmid, Linf, Len_age, maxage, nareas, Asize, nCALbins,
qs, qvar, qinc,
Effort_pot,
checks=FALSE) {
# Effort
if (length(MPRecs$Effort) == 0) { # no max effort recommendation
if (y==1) TAE <- LastTAE * E_f[,y] # max effort is unchanged but has implementation error
if (y>1) TAE <- LastTAE / E_f[,y-1] * E_f[,y] # max effort is unchanged but has implementation error
} else if (length(MPRecs$Effort) != nsim) {
stop("Effort recommmendation is not 'nsim' long.\n Does MP return Effort recommendation under all conditions?")
} else {
# a maximum effort recommendation
if (!all(is.na(histTAE))) {
TAE <- histTAE * MPRecs$Effort * E_f[,y] # adjust existing TAE adjustment with implementation error
} else {
TAE <- MPRecs$Effort * E_f[,y] # adjust existing TAE adjustment with implementation error
}
}
# Spatial
if (all(is.na(MPRecs$Spatial))) { # no spatial recommendation
Si <- LastSpatial # spatial is unchanged
} else if (any(is.na(MPRecs$Spatial))) {
stop("Spatial recommmendation has some NAs.\n Does MP return Spatial recommendation under all conditions?")
} else {
Si <- MPRecs$Spatial # change spatial fishing
}
if (all(dim(Si) != c(nareas, nsim))) stop("Spatial recommmendation not nareas long")
# Allocation
if (length(MPRecs$Allocate) == 0) { # no allocation recommendation
Ai <- LastAllocat # allocation is unchanged
} else if (length(MPRecs$Allocate) != nsim) {
stop("Allocate recommmendation is not 'nsim' long.\n Does MP return Allocate recommendation under all conditions?")
} else {
Ai <- MPRecs$Allocate # change in spatial allocation
}
Ai <- as.numeric(Ai)
# Retention Curve
RetentFlag <- FALSE # should retention curve be updated for future years?
# LR5
if (length(MPRecs$LR5) == 0) { # no recommendation
LR5_P[(y + nyears):(nyears+proyears),] <- matrix(LR5_P[y + nyears-1,],
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # unchanged
} else if (length(MPRecs$LR5) != nsim) {
stop("LR5 recommmendation is not 'nsim' long.\n Does MP return LR5 recommendation under all conditions?")
} else {
LR5_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$LR5 * SizeLim_f[,y],
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # recommendation with implementation error
RetentFlag <- TRUE
}
# LFR
if (length(MPRecs$LFR) == 0) { # no recommendation
LFR_P[(y + nyears):(nyears+proyears),] <- matrix(LFR_P[y + nyears-1,],
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # unchanged
} else if (length(MPRecs$LFR) != nsim) {
stop("LFR recommmendation is not 'nsim' long.\n Does MP return LFR recommendation under all conditions?")
} else {
LFR_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$LFR * SizeLim_f[,y],
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # recommendation with implementation error
RetentFlag <- TRUE
}
# Rmaxlen
if (length(MPRecs$Rmaxlen) == 0) { # no recommendation
Rmaxlen_P[(y + nyears):(nyears+proyears),] <- matrix(Rmaxlen_P[y + nyears-1,],
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # unchanged
} else if (length(MPRecs$Rmaxlen) != nsim) {
stop("Rmaxlen recommmendation is not 'nsim' long.\n Does MP return Rmaxlen recommendation under all conditions?")
} else {
Rmaxlen_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$Rmaxlen,
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # recommendation
RetentFlag <- TRUE
}
# HS - harvest slot
if (length(MPRecs$HS) == 0) { # no recommendation
HS <- rep(1E5, nsim) # no harvest slot
} else if (length(MPRecs$HS) != nsim) {
stop("HS recommmendation is not 'nsim' long.\n Does MP return HS recommendation under all conditions?")
} else {
HS <- MPRecs$HS * SizeLim_f[,y] # recommendation
RetentFlag <- TRUE
}
# Selectivity Curve
SelectFlag <- FALSE # has selectivity been updated?
# L5
if (length(MPRecs$L5) == 0) { # no recommendation
L5_P[(y + nyears):(nyears+proyears),] <- matrix(L5_P[y + nyears-1,],
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # unchanged
} else if (length(MPRecs$L5) != nsim) {
stop("L5 recommmendation is not 'nsim' long.\n Does MP return L5 recommendation under all conditions?")
} else {
L5_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$L5 * SizeLim_f[,y],
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # recommendation with implementation error
SelectFlag <- TRUE
}
# LFS
if (length(MPRecs$LFS) == 0) { # no recommendation
LFS_P[(y + nyears):(nyears+proyears),] <- matrix(LFS_P[y + nyears-1,],
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # unchanged
} else if (length(MPRecs$LFS) != nsim) {
stop("LFS recommmendation is not 'nsim' long.\n Does MP return LFS recommendation under all conditions?")
} else {
LFS_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$LFS * SizeLim_f[,y],
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # recommendation with implementation error
SelectFlag <- TRUE
}
# Vmaxlen
if (length(MPRecs$Rmaxlen) == 0) { # no recommendation
Vmaxlen_P[(y + nyears):(nyears+proyears),] <- matrix(Vmaxlen_P[y + nyears-1,],
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # unchanged
} else if (length(MPRecs$Rmaxlen) != nsim) {
stop("Rmaxlen recommmendation is not 'nsim' long.\n Does MP return Rmaxlen recommendation under all conditions?")
} else {
Vmaxlen_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$Vmaxlen,
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # recommendation
SelectFlag <- TRUE
}
# Discard Mortality
if (length(MPRecs$Fdisc) >0) { # Fdisc has changed
if (length(MPRecs$Fdisc) != nsim) stop("Fdisc recommmendation is not 'nsim' long.\n Does MP return Fdisc recommendation under all conditions?")
Fdisc_P <- MPRecs$Fdisc
}
# Discard Ratio
if (length(MPRecs$DR)>0) { # DR has changed
if (length(MPRecs$DR) != nsim) stop("DR recommmendation is not 'nsim' long.\n Does MP return DR recommendation under all conditions?")
DR_P[(y+nyears):(nyears+proyears),] <- matrix(MPRecs$DR, nrow=length((y+nyears):(nyears+proyears)), ncol=nsim, byrow=TRUE)
}
# Update Selectivity and Retention Curve
if (SelectFlag | RetentFlag) {
yr <- y+nyears
allyrs <- (y+nyears):(nyears+proyears) # update vulnerabilty for all future years
srs <- (Linf - LFS_P[yr,]) / ((-log(Vmaxlen_P[yr,],2))^0.5) # descending limb
srs[!is.finite(srs)] <- Inf
sls <- (LFS_P[yr,] - L5_P[yr,]) / ((-log(0.05,2))^0.5) # ascending limb
CAL_binsmidMat <- matrix(CAL_binsmid, nrow=nsim, ncol=length(CAL_binsmid), byrow=TRUE)
selLen <- t(sapply(1:nsim, getsel, lens=CAL_binsmidMat, lfs=LFS_P[yr,], sls=sls, srs=srs))
for (yy in allyrs) {
# calculate new selectivity at age curve
V_P[ , , yy] <- t(sapply(1:nsim, getsel, lens=Len_age[,,yy], lfs=LFS_P[yy,], sls=sls, srs=srs))
SLarray_P[,, yy] <- selLen # calculate new selectivity at length curve
}
# sim <- 158
# plot(CAL_binsmid, selLen[sim,], type="b")
# lines(c(L5_P[yr,sim], L5_P[yr,sim]), c(0, 0.05), lty=2)
# lines(c(LFS_P[yr,sim], LFS_P[yr,sim]), c(0, 1), lty=2)
# lines(c(Linf[sim], Linf[sim]), c(0, Vmaxlen_P[yr,sim]), lty=2)
# calculate new retention curve
yr <- y+nyears
allyrs <- (y+nyears):(nyears+proyears) # update vulnerabilty for all future years
srs <- (Linf - LFR_P[yr,]) / ((-log(Rmaxlen_P[yr,],2))^0.5) # selectivity parameters are constant for all years
srs[!is.finite(srs)] <- Inf
sls <- (LFR_P[yr,] - LR5_P[yr,]) / ((-log(0.05,2))^0.5)
CAL_binsmidMat <- matrix(CAL_binsmid, nrow=nsim, ncol=length(CAL_binsmid), byrow=TRUE)
relLen <- t(sapply(1:nsim, getsel, lens=CAL_binsmidMat, lfs=LFR_P[yr,], sls=sls, srs=srs))
for (yy in allyrs) {
# calculate new retention at age curve
retA_P[ , , yy] <- t(sapply(1:nsim, getsel, lens=Len_age[,,yy], lfs=LFR_P[yy,], sls=sls, srs=srs))
retL_P[,, yy] <- relLen # calculate new retention at length curve
}
# upper harvest slot
aboveHS <- Len_age[,,allyrs, drop=FALSE]>array(HS, dim=c(nsim, maxage, length(allyrs)))
tretA_P <- retA_P[,,allyrs]
tretA_P[aboveHS] <- 0
retA_P[,,allyrs] <- tretA_P
for (ss in 1:nsim) {
index <- which(CAL_binsmid >= HS[ss])
retL_P[ss, index, allyrs] <- 0
}
dr <- aperm(abind::abind(rep(list(DR_P), maxage), along=3), c(2,3,1))
retA_P[,,allyrs] <- (1-dr[,,yr]) * retA_P[,,yr]
dr <- aperm(abind::abind(rep(list(DR_P), nCALbins), along=3), c(2,3,1))
retL_P[,,allyrs] <- (1-dr[,,yr]) * retL_P[,,yr]
# update realized vulnerablity curve with retention and dead discarded fish
Fdisc_array1 <- array(Fdisc_P, dim=c(nsim, maxage, length(allyrs)))
V_P[,,allyrs] <- V_P[,,allyrs, drop=FALSE] * (retA_P[,,allyrs, drop=FALSE] + (1-retA_P[,,allyrs, drop=FALSE])*Fdisc_array1)
Fdisc_array2 <- array(Fdisc_P, dim=c(nsim, nCALbins, length(allyrs)))
SLarray_P[,,allyrs] <- SLarray_P[,,allyrs, drop=FALSE] * (retL_P[,,allyrs, drop=FALSE]+ (1-retL_P[,,allyrs, drop=FALSE])*Fdisc_array2)
# Realised Retention curves
retA_P[,,allyrs] <- retA_P[,,allyrs] * V_P[,,allyrs]
retL_P[,,allyrs] <- retL_P[,,allyrs] * SLarray_P[,,allyrs]
}
CurrentB <- Biomass_P[,,y,] # biomass at the beginning of year
CurrentVB <- array(NA, dim=dim(CurrentB))
Catch_tot <- Catch_retain <- array(NA, dim=dim(CurrentB)) # catch this year arrays
FMc <- Zc <- array(NA, dim=dim(CurrentB)) # fishing and total mortality this year
# indices
SAYRL <- as.matrix(expand.grid(1:nsim, 1:maxage, nyears, 1:nareas)) # Final historical year
SAYRt <- as.matrix(expand.grid(1:nsim, 1:maxage, y + nyears, 1:nareas)) # Trajectory year
SAYR <- as.matrix(expand.grid(1:nsim, 1:maxage, y, 1:nareas))
SAR <- SAYR[, c(1,2,4)]
SAY <- SAYR[,c(1:3)]
SYt <- SAYRt[, c(1, 3)]
SAYt <- SAYRt[, 1:3]
SR <- SAYR[, c(1, 4)]
SA1 <- SAYR[, 1:2]
S1 <- SAYR[, 1]
SY1 <- SAYR[, c(1, 3)]
SAY1 <- SAYR[, 1:3]
SYA <- as.matrix(expand.grid(1:nsim, 1, 1:maxage)) # Projection year
SY <- SYA[, 1:2]
SA <- SYA[, c(1, 3)]
S <- SYA[, 1]
CurrentVB[SAR] <- CurrentB[SAR] * V_P[SAYt] # update available biomass if selectivity has changed
# Calculate fishing distribution if all areas were open
newVB <- apply(CurrentVB, c(1,3), sum) # calculate total vuln biomass by area
fishdist <- (newVB^Spat_targ)/apply(newVB^Spat_targ, 1, sum) # spatial preference according to spatial vulnerable biomass
d1 <- t(Si) * fishdist # distribution of fishing effort
fracE <- apply(d1, 1, sum) # fraction of current effort in open areas
fracE2 <- d1 * (fracE + (1-fracE) * Ai)/fracE # re-distribution of fishing effort accounting for re-allocation of effort
fishdist <- fracE2 # fishing effort by area
# ---- no TAC - calculate F with bio-economic effort ----
if (all(is.na(TACused))) {
if (all(is.na(Effort_pot)) & all(is.na(TAE))) Effort_pot <- rep(1, nsim) # historical effort
if (all(is.na(Effort_pot))) Effort_pot <- TAE[1,]
# fishing mortality with bio-economic effort
FM_P[SAYR] <- (FinF[S1] * Effort_pot[S1] * V_P[SAYt] * t(Si)[SR] * fishdist[SR] *
qvar[SY1] * (qs[S1]*(1 + qinc[S1]/100)^y))/Asize[SR]
# retained fishing mortality with bio-economic effort
FM_Pret[SAYR] <- (FinF[S1] * Effort_pot[S1] * retA_P[SAYt] * t(Si)[SR] * fishdist[SR] *
qvar[SY1] * qs[S1]*(1 + qinc[S1]/100)^y)/Asize[SR]
}
# ---- calculate required F and effort for TAC recommendation ----
if (!all(is.na(TACused))) { # a TAC has been set
# if MP returns NA - TAC is set to TAC from last year
TACused[is.na(TACused)] <- LastTAC[is.na(TACused)]
TACusedE <- TAC_f[,y]*TACused # TAC taken after implementation error
# Calculate total vulnerable biomass available mid-year accounting for any changes in selectivity &/or spatial closures
M_array <- array(0.5*M_ageArray[,,nyears+y], dim=c(nsim, maxage, nareas))
Atemp <- apply(CurrentVB * exp(-M_array), c(1,3), sum) # mid-year before fishing
availB <- apply(Atemp * t(Si), 1, sum) # adjust for spatial closures
# Calculate total F (using Steve Martell's approach http://api.admb-project.org/baranov_8cpp_source.html)
expC <- TACusedE
expC[TACusedE> availB] <- availB[TACusedE> availB] * 0.99
Ftot <- sapply(1:nsim, calcF, expC, V_P, Biomass_P, fishdist, Asize, maxage, nareas,
M_ageArray,nyears, y)
# apply max F constraint
Ftot[Ftot<0] <- maxF
Ftot[!is.finite(Ftot)] <- maxF
Ftot[Ftot>maxF] <- maxF
# Calculate F & Z by age class
FM_P[SAYR] <- Ftot[S] * V_P[SAYt] * fishdist[SR]/Asize[SR]
FM_Pret[SAYR] <- Ftot[S] * retA_P[SAYt] * fishdist[SR]/Asize[SR]
Z_P[SAYR] <- FM_P[SAYR] + M_ageArray[SAYt] # calculate total mortality
# Calculate total and retained catch
CB_P[SAYR] <- FM_P[SAYR]/Z_P[SAYR] * (1-exp(-Z_P[SAYR])) * Biomass_P[SAYR]
CB_Pret[SAYR] <- FM_P[SAYR]/Z_P[SAYR] * (1-exp(-Z_P[SAYR])) * Biomass_P[SAYR]
# Calculate total removals when CB_Pret == TAC - total removal > retained when discarding
actualremovals <- apply(CB_P[,,y,], 1, sum)
retained <- apply(CB_Pret[,,y,], 1, sum)
ratio <- actualremovals/retained # ratio of actual removals to retained catch
ratio[!is.finite(ratio)] <- 0
ratio[ratio>1E5] <- 1E5
temp <- CB_Pret[,,y,]/apply(CB_Pret[,,y,], 1, sum) # distribution by age & area of retained fish
Catch_retain <- TACusedE * temp # retained catch
Catch_tot <- CB_P[,,y,]/apply(CB_P[,,y,], 1, sum) # distribution by age & area of caught fish
temp <- Catch_tot/apply(Catch_tot, 1, sum) # distribution of removals
Catch_tot <- TACusedE * ratio * temp # scale up total removals (if applicable)
# total removals can't be more than available biomass
chk <- apply(Catch_tot, 1, sum) > availB
if (sum(chk)>0) {
c_temp <- apply(Catch_tot[chk,,, drop=FALSE], 1, sum)
ratio_temp <- (availB[chk]/c_temp) * 0.99
# scale total catches to 0.99 available biomass
if (sum(chk)>1) Catch_tot[chk,, ] <- Catch_tot[chk,,] * array(ratio_temp, dim=c(sum(chk), maxage, nareas))
if (sum(chk)==1) Catch_tot[chk,, ] <- Catch_tot[chk,,] * array(ratio_temp, dim=c(maxage, nareas))
}
# check where actual catches are higher than TAC due to discarding (with imp error)
ind <- which(apply(Catch_tot, 1, sum) > TACusedE)
if (length(ind)>0) {
# update Ftot calcs
Ftot[ind] <- sapply(ind, calcF, TACusedE, V_P, Biomass_P, fishdist, Asize,
maxage, nareas, M_ageArray,nyears, y)
}
# Calculate F & Z by age class
FM_P[SAYR] <- Ftot[S] * V_P[SAYt] * fishdist[SR]/Asize[SR]
FM_Pret[SAYR] <- Ftot[S] * retA_P[SAYt] * fishdist[SR]/Asize[SR]
Z_P[SAYR] <- FM_P[SAYR] + M_ageArray[SAYt] # calculate total mortality
}
# Apply maxF constraint
FM_P[SAYR][FM_P[SAYR] > maxF] <- maxF
FM_Pret[SAYR][FM_Pret[SAYR] > maxF] <- maxF
Z_P[SAYR] <- FM_P[SAYR] + M_ageArray[SAYt] # calculate total mortality
# Update catches after maxF constraint
CB_P[SAYR] <- FM_P[SAYR]/Z_P[SAYR] * (1-exp(-Z_P[SAYR])) * Biomass_P[SAYR]
CB_Pret[SAYR] <- FM_Pret[SAYR]/Z_P[SAYR] * (1-exp(-Z_P[SAYR])) * Biomass_P[SAYR]
# Effort_req - effort required to catch TAC
# Effort_pot - potential effort this year (active fishers) from bio-economic model
# Effort_act - actual effort this year
# TAE - maximum actual effort limit
# Effort_act < Effort_pot if Effort_req < Effort_pot
# Calculate total F (using Steve Martell's approach http://api.admb-project.org/baranov_8cpp_source.html)
totalCatch <- apply(CB_P[,,y,], 1, sum)
Ftot <- sapply(1:nsim, calcF, totalCatch, V_P, Biomass_P, fishdist, Asize, maxage, nareas,
M_ageArray,nyears, y)
# Effort relative to last historical with this potential catch
Effort_req <- Ftot/(FinF * qs*qvar[,y]* (1 + qinc/100)^y) * apply(fracE2, 1, sum) # effort required for this catch
# Limit effort to potential effort from bio-economic model
Effort_act <- Effort_req
if (!all(is.na(Effort_pot))) {
excessEff <- Effort_req>Effort_pot # simulations where required effort > potential effort
Effort_act[excessEff] <- Effort_pot[excessEff] # actual effort can't be more than bio-economic effort
}
# Limit actual effort <= TAE
if (!all(is.na(TAE))) { # a TAE exists
Effort_act[Effort_act>TAE] <- TAE[Effort_act>TAE]
}
Effort_act[Effort_act<=0] <- tiny
# --- Re-calculate catch given actual effort ----
# fishing mortality with actual effort
FM_P[SAYR] <- (FinF[S1] * Effort_act[S1] * V_P[SAYt] * t(Si)[SR] * fishdist[SR] *
qvar[SY1] * (qs[S1]*(1 + qinc[S1]/100)^y))/Asize[SR]
# retained fishing mortality with actual effort
FM_Pret[SAYR] <- (FinF[S1] * Effort_act[S1] * retA_P[SAYt] * t(Si)[SR] * fishdist[SR] *
qvar[SY1] * qs[S1]*(1 + qinc[S1]/100)^y)/Asize[SR]
# Apply maxF constraint
FM_P[SAYR][FM_P[SAYR] > maxF] <- maxF
FM_Pret[SAYR][FM_Pret[SAYR] > maxF] <- maxF
Z_P[SAYR] <- FM_P[SAYR] + M_ageArray[SAYt] # calculate total mortality
# Update catches after maxF constraint
CB_P[SAYR] <- FM_P[SAYR]/Z_P[SAYR] * (1-exp(-Z_P[SAYR])) * Biomass_P[SAYR]
CB_Pret[SAYR] <- FM_Pret[SAYR]/Z_P[SAYR] * (1-exp(-Z_P[SAYR])) * Biomass_P[SAYR]
# Calculate total F (using Steve Martell's approach http://api.admb-project.org/baranov_8cpp_source.html)
totalCatch <- apply(CB_P[,,y,], 1, sum)
Ftot <- sapply(1:nsim, calcF, totalCatch, V_P, Biomass_P, fishdist, Asize, maxage, nareas,
M_ageArray,nyears, y) # update if effort has changed
# Returns
out <- list()
out$TACrec <- TACused
out$V_P <- V_P
out$SLarray_P <- SLarray_P
out$retA_P <- retA_P
out$retL_P <- retL_P
out$Fdisc_P <- Fdisc_P
out$VBiomass_ <- VBiomass_P
out$Z_P <- Z_P
out$FM_P <- FM_P
out$FM_Pret <- FM_Pret
out$CB_P <- CB_P
out$CB_Pret <- CB_Pret
out$Si <- Si
out$Ai <- Ai
out$TAE <- TAE
out$Effort <- Effort_act # actual effort this year
out$Ftot <- Ftot
out
}
# if (length(MPRecs$Effort) >0 | all(Ei != 1)) { # an effort regulation also exists
# #Make sure Effort doesn't exceed regulated effort
# aboveE <- which(Effort > Ei)
# if (length(aboveE)>0) {
# Effort[aboveE] <- Ei[aboveE] * apply(fracE2, 1, sum)[aboveE]
# SAYR <- as.matrix(expand.grid(aboveE, 1:maxage, y, 1:nareas))
# SAYRt <- as.matrix(expand.grid(aboveE, 1:maxage, y + nyears, 1:nareas)) # Trajectory year
# SYt <- SAYRt[, c(1, 3)]
# SAYt <- SAYRt[, 1:3]
# SR <- SAYR[, c(1, 4)]
# S1 <- SAYR[, 1]
# SY1 <- SAYR[, c(1, 3)]
# FM_P[SAYR] <- (FinF[S1] * Ei[S1] * V_P[SAYt] * t(Si)[SR] * fishdist[SR] * qvar[SY1] *
# (qs[S1]*(1 + qinc[S1]/100)^y))/Asize[SR]
#
# # retained fishing mortality with input control recommendation
# FM_Pret[SAYR] <- (FinF[S1] * Ei[S1] * retA_P[SAYt] * t(Si)[SR] * fishdist[SR] *
# qvar[SY1] * qs[S1]*(1 + qinc[S1]/100)^y)/Asize[SR]
#
# Z_P[SAYR] <- FM_P[SAYR] + M_ageArray[SAYt] # calculate total mortality
# CB_P[SAYR] <- (1-exp(-FM_P[SAYR])) * (Biomass_P[SAYR] * exp(-0.5*M_ageArray[SAYt]))
# CB_Pret[SAYR] <- (1-exp(-FM_Pret[SAYR])) * (Biomass_P[SAYR] * exp(-0.5*M_ageArray[SAYt]))
# }
# }
# CalcMPDynamics <- function(MPRecs, y, nyears, proyears, nsim,
# LastEi, LastSpatial, LastAllocat, LastCatch,
# TACused, maxF,
# LR5_P, LFR_P, Rmaxlen_P, retL_P, retA_P,
# L5_P, LFS_P, Vmaxlen_P, SLarray_P, V_P,
# Fdisc_P, DR_P,
# M_ageArray, FM_P, FM_Pret, Z_P, CB_P, CB_Pret,
# TAC_f, E_f, SizeLim_f,
# VBiomass_P, Biomass_P, FinF, Spat_targ,
# CAL_binsmid, Linf, Len_age, maxage, nareas, Asize, nCALbins,
# qs, qvar, qinc) {
# # Change in Effort
# if (length(MPRecs$Effort) == 0) { # no effort recommendation
# if (y==1) Ei <- LastEi * E_f[,y] # effort is unchanged but has implementation error
# if (y>1) Ei <- LastEi / E_f[,y-1] * E_f[,y] # effort is unchanged but has implementation error
# } else if (length(MPRecs$Effort) != nsim) {
# stop("Effort recommmendation is not 'nsim' long.\n Does MP return Effort recommendation under all conditions?")
# } else {
# Ei <- MPRecs$Effort * E_f[,y] # effort adjustment with implementation error
# }
#
# # Spatial
# if (all(is.na(MPRecs$Spatial))) { # no spatial recommendation
# Si <- LastSpatial # spatial is unchanged
# } else if (any(is.na(MPRecs$Spatial))) {
# stop("Spatial recommmendation has some NAs.\n Does MP return Spatial recommendation under all conditions?")
# } else {
# Si <- MPRecs$Spatial # change spatial fishing
# }
# if (all(dim(Si) != c(nareas, nsim))) stop("Spatial recommmendation not nareas long")
#
# # Allocation
# if (length(MPRecs$Allocate) == 0) { # no allocation recommendation
# Ai <- LastAllocat # allocation is unchanged
# } else if (length(MPRecs$Allocate) != nsim) {
# stop("Allocate recommmendation is not 'nsim' long.\n Does MP return Allocate recommendation under all conditions?")
# } else {
# Ai <- MPRecs$Allocate # change in spatial allocation
# }
# Ai <- as.numeric(Ai)
#
# # Retention Curve
# RetentFlag <- FALSE # should retention curve be updated for future years?
# # LR5
# if (length(MPRecs$LR5) == 0) { # no recommendation
# LR5_P[(y + nyears):(nyears+proyears),] <- matrix(LR5_P[y + nyears-1,],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # unchanged
#
# } else if (length(MPRecs$LR5) != nsim) {
# stop("LR5 recommmendation is not 'nsim' long.\n Does MP return LR5 recommendation under all conditions?")
# } else {
# LR5_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$LR5 * SizeLim_f[,y],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # recommendation with implementation error
# RetentFlag <- TRUE
# }
# # LFR
# if (length(MPRecs$LFR) == 0) { # no recommendation
# LFR_P[(y + nyears):(nyears+proyears),] <- matrix(LFR_P[y + nyears-1,],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # unchanged
# } else if (length(MPRecs$LFR) != nsim) {
# stop("LFR recommmendation is not 'nsim' long.\n Does MP return LFR recommendation under all conditions?")
# } else {
# LFR_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$LFR * SizeLim_f[,y],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # recommendation with implementation error
# RetentFlag <- TRUE
# }
# # Rmaxlen
# if (length(MPRecs$Rmaxlen) == 0) { # no recommendation
# Rmaxlen_P[(y + nyears):(nyears+proyears),] <- matrix(Rmaxlen_P[y + nyears-1,],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # unchanged
#
# } else if (length(MPRecs$Rmaxlen) != nsim) {
# stop("Rmaxlen recommmendation is not 'nsim' long.\n Does MP return Rmaxlen recommendation under all conditions?")
# } else {
# Rmaxlen_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$Rmaxlen,
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # recommendation
# RetentFlag <- TRUE
# }
#
#
# # HS - harvest slot
# if (length(MPRecs$HS) == 0) { # no recommendation
# HS <- rep(1E5, nsim) # no harvest slot
# } else if (length(MPRecs$HS) != nsim) {
# stop("HS recommmendation is not 'nsim' long.\n Does MP return HS recommendation under all conditions?")
# } else {
# HS <- MPRecs$HS * SizeLim_f[,y] # recommendation
# RetentFlag <- TRUE
# }
#
# # Selectivity Curve
# SelectFlag <- FALSE # has selectivity been updated?
# # L5
# if (length(MPRecs$L5) == 0) { # no recommendation
# L5_P[(y + nyears):(nyears+proyears),] <- matrix(L5_P[y + nyears-1,],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # unchanged
#
# } else if (length(MPRecs$L5) != nsim) {
# stop("L5 recommmendation is not 'nsim' long.\n Does MP return L5 recommendation under all conditions?")
# } else {
# L5_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$L5 * SizeLim_f[,y],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # recommendation with implementation error
# SelectFlag <- TRUE
# }
# # LFS
# if (length(MPRecs$LFS) == 0) { # no recommendation
# LFS_P[(y + nyears):(nyears+proyears),] <- matrix(LFS_P[y + nyears-1,],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # unchanged
# } else if (length(MPRecs$LFS) != nsim) {
# stop("LFS recommmendation is not 'nsim' long.\n Does MP return LFS recommendation under all conditions?")
# } else {
# LFS_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$LFS * SizeLim_f[,y],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # recommendation with implementation error
# SelectFlag <- TRUE
# }
# # Vmaxlen
# if (length(MPRecs$Rmaxlen) == 0) { # no recommendation
# Vmaxlen_P[(y + nyears):(nyears+proyears),] <- matrix(Vmaxlen_P[y + nyears-1,],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # unchanged
#
# } else if (length(MPRecs$Rmaxlen) != nsim) {
# stop("Rmaxlen recommmendation is not 'nsim' long.\n Does MP return Rmaxlen recommendation under all conditions?")
# } else {
# Vmaxlen_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$Vmaxlen,
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # recommendation
# SelectFlag <- TRUE
# }
#
# # Discard Mortality
# if (length(MPRecs$Fdisc) >0) { # Fdisc has changed
# if (length(MPRecs$Fdisc) != nsim) stop("Fdisc recommmendation is not 'nsim' long.\n Does MP return Fdisc recommendation under all conditions?")
# Fdisc_P <- MPRecs$Fdisc
# }
#
# # Discard Ratio
# if (length(MPRecs$DR)>0) { # DR has changed
# if (length(MPRecs$DR) != nsim) stop("DR recommmendation is not 'nsim' long.\n Does MP return DR recommendation under all conditions?")
# DR_P[(y+nyears):(nyears+proyears),] <- matrix(MPRecs$DR, nrow=length((y+nyears):(nyears+proyears)), ncol=nsim, byrow=TRUE)
# }
#
# # Update Selectivity and Retention Curve
# if (SelectFlag | RetentFlag) {
# yr <- y+nyears
# allyrs <- (y+nyears):(nyears+proyears) # update vulnerabilty for all future years
#
# srs <- (Linf - LFS_P[yr,]) / ((-log(Vmaxlen_P[yr,],2))^0.5) # descending limb
# srs[!is.finite(srs)] <- Inf
# sls <- (LFS_P[yr,] - L5_P[yr,]) / ((-log(0.05,2))^0.5) # ascending limb
#
# CAL_binsmidMat <- matrix(CAL_binsmid, nrow=nsim, ncol=length(CAL_binsmid), byrow=TRUE)
# selLen <- t(sapply(1:nsim, getsel, lens=CAL_binsmidMat, lfs=LFS_P[yr,], sls=sls, srs=srs))
#
# for (yy in allyrs) {
# # calculate new selectivity at age curve
# V_P[ , , yy] <- t(sapply(1:nsim, getsel, lens=Len_age[,,yy], lfs=LFS_P[yy,], sls=sls, srs=srs))
#
# # calculate new selectivity at length curve
# SLarray_P[,, yy] <- selLen
# }
#
# # sim <- 158
# # plot(CAL_binsmid, selLen[sim,], type="b")
# # lines(c(L5_P[yr,sim], L5_P[yr,sim]), c(0, 0.05), lty=2)
# # lines(c(LFS_P[yr,sim], LFS_P[yr,sim]), c(0, 1), lty=2)
# # lines(c(Linf[sim], Linf[sim]), c(0, Vmaxlen_P[yr,sim]), lty=2)
#
# # calculate new retention curve
# yr <- y+nyears
# allyrs <- (y+nyears):(nyears+proyears) # update vulnerabilty for all future years
#
# srs <- (Linf - LFR_P[yr,]) / ((-log(Rmaxlen_P[yr,],2))^0.5) # selectivity parameters are constant for all years
# srs[!is.finite(srs)] <- Inf
# sls <- (LFR_P[yr,] - LR5_P[yr,]) / ((-log(0.05,2))^0.5)
#
# CAL_binsmidMat <- matrix(CAL_binsmid, nrow=nsim, ncol=length(CAL_binsmid), byrow=TRUE)
# relLen <- t(sapply(1:nsim, getsel, lens=CAL_binsmidMat, lfs=LFR_P[yr,], sls=sls, srs=srs))
#
# for (yy in allyrs) {
# # calculate new retention at age curve
# retA_P[ , , yy] <- t(sapply(1:nsim, getsel, lens=Len_age[,,yy], lfs=LFR_P[yy,], sls=sls, srs=srs))
#
# # calculate new retention at length curve
# retL_P[,, yy] <- relLen
# }
#
# # upper harvest slot
# aboveHS <- Len_age[,,allyrs, drop=FALSE]>array(HS, dim=c(nsim, maxage, length(allyrs)))
# tretA_P <- retA_P[,,allyrs]
# tretA_P[aboveHS] <- 0
# retA_P[,,allyrs] <- tretA_P
# for (ss in 1:nsim) {
# index <- which(CAL_binsmid >= HS[ss])
# retL_P[ss, index, allyrs] <- 0
# }
#
# dr <- aperm(abind::abind(rep(list(DR_P), maxage), along=3), c(2,3,1))
# retA_P[,,allyrs] <- (1-dr[,,yr]) * retA_P[,,yr]
# dr <- aperm(abind::abind(rep(list(DR_P), nCALbins), along=3), c(2,3,1))
# retL_P[,,allyrs] <- (1-dr[,,yr]) * retL_P[,,yr]
#
# # update realized vulnerablity curve with retention and dead discarded fish
# Fdisc_array1 <- array(Fdisc_P, dim=c(nsim, maxage, length(allyrs)))
#
# V_P[,,allyrs] <- V_P[,,allyrs, drop=FALSE] * (retA_P[,,allyrs, drop=FALSE] + (1-retA_P[,,allyrs, drop=FALSE])*Fdisc_array1)
#
# Fdisc_array2 <- array(Fdisc_P, dim=c(nsim, nCALbins, length(allyrs)))
# SLarray_P[,,allyrs] <- SLarray_P[,,allyrs, drop=FALSE] * (retL_P[,,allyrs, drop=FALSE]+ (1-retL_P[,,allyrs, drop=FALSE])*Fdisc_array2)
#
# # Realised Retention curves
# retA_P[,,allyrs] <- retA_P[,,allyrs] * V_P[,,allyrs]
# retL_P[,,allyrs] <- retL_P[,,allyrs] * SLarray_P[,,allyrs]
# }
#
# # indices
# SAYRL <- as.matrix(expand.grid(1:nsim, 1:maxage, nyears, 1:nareas)) # Final historical year
# SAYRt <- as.matrix(expand.grid(1:nsim, 1:maxage, y + nyears, 1:nareas)) # Trajectory year
# SAYR <- as.matrix(expand.grid(1:nsim, 1:maxage, y, 1:nareas))
# SYt <- SAYRt[, c(1, 3)]
# SAYt <- SAYRt[, 1:3]
# SR <- SAYR[, c(1, 4)]
# SA1 <- SAYR[, 1:2]
# S1 <- SAYR[, 1]
# SY1 <- SAYR[, c(1, 3)]
# SAY1 <- SAYR[, 1:3]
# SYA <- as.matrix(expand.grid(1:nsim, 1, 1:maxage)) # Projection year
# SY <- SYA[, 1:2]
# SA <- SYA[, c(1, 3)]
# SAY <- SYA[, c(1, 3, 2)]
# S <- SYA[, 1]
#
# # update vulnerable biomass for selectivitity curve
# VBiomass_P[SAYR] <- Biomass_P[SAYR] * V_P[SAYt] # update vulnerable biomass
#
# # Calculate fishing distribution if all areas were open
# newVB <- apply(VBiomass_P[,,y,], c(1,3), sum) # calculate total vuln biomass by area
# fishdist <- (newVB^Spat_targ)/apply(newVB^Spat_targ, 1, sum) # spatial preference according to spatial vulnerable biomass
#
# d1 <- t(Si) * fishdist # distribution of fishing effort
# fracE <- apply(d1, 1, sum) # fraction of current effort in open areas
# fracE2 <- d1 * (fracE + (1-fracE) * Ai)/fracE # re-distribution of fishing effort
#
# fishdist <- fracE2 # fishing effort by area
#
# # Apply TAC recommendation
# if (all(is.na(TACused))) { # no TAC has been set
#
# # fishing mortality with effort control recommendation
# FM_P[SAYR] <- (FinF[S1] * Ei[S1] * V_P[SAYt] * t(Si)[SR] * fishdist[SR] *
# qvar[SY1] * (qs[S1]*(1 + qinc[S1]/100)^y))/Asize[SR]
#
# # retained fishing mortality with effort control recommendation
# FM_Pret[SAYR] <- (FinF[S1] * Ei[S1] * retA_P[SAYt] * t(Si)[SR] * fishdist[SR] *
# qvar[SY1] * qs[S1]*(1 + qinc[S1]/100)^y)/Asize[SR]
#
# Z_P[SAYR] <- FM_P[SAYR] + M_ageArray[SAYt] # calculate total mortality
#
# CB_P[SAYR] <- FM_P[SAYR]/Z_P[SAYR] * Biomass_P[SAYR] * (1 - exp(-Z_P[SAYR]))
# CB_Pret[SAYR] <- FM_Pret[SAYR]/Z_P[SAYR] * Biomass_P[SAYR] * (1 - exp(-Z_P[SAYR]))
#
# Effort <- FinF *Ei * apply(fracE2, 1, sum) # (Fs/qs)/ FinF # Ei # (Fs/qs)/ FinF change in catchability not included in effort calc: * qvar[,y] * ((1 + qinc/100)^y))
#
# } else { # A TAC has been set
#
#
# TACused[is.na(TACused)] <- LastCatch[is.na(TACused)] # if MP returns NA - TAC is set to catch from last year
# TACrec <- TACused # TAC recommendation
# TACusedE<- TAC_f[,y]*TACused # TAC taken after implementation error
#
# availB <- apply(newVB * t(Si), 1, sum)
#
# # maxC <- (1 - exp(-maxF)) * availB # maximum catch given maxF
# # TACusedE[TACusedE > maxC] <- maxC[TACusedE > maxC] # apply maxF limit - catch can't be higher than maxF * vulnerable biomass
#
# CB_P[SAYR] <- (Biomass_P[SAYR] * V_P[SAYt] * fishdist[SR])/Asize[SR] # ignore magnitude of effort or q increase (just get distribution across age and fishdist across space
# # calculate distribution of retained effort
# CB_Pret[SAYR] <- (Biomass_P[SAYR] * retA_P[SAYt] * fishdist[SR])/Asize[SR] # ignore magnitude of effort or q increase (just get distribution across age and fishdist across space
#
# retained <- apply(CB_Pret[,,y,], 1, sum)
# actualremovals <- apply(CB_P[,,y,], 1, sum)
# ratio <- actualremovals/retained # ratio of actual removals to retained catch
# ratio[!is.finite(ratio)] <- 0
# ratio[ratio>1E5] <- 1E5
# temp <- CB_Pret[, , y, ]/apply(CB_Pret[, , y, ], 1, sum) # distribution of retained fish
# CB_Pret[, , y, ] <- TACusedE * temp # retained catch
#
# temp <- CB_P[, , y, ]/apply(CB_P[, , y, ], 1, sum) # distribution of removals
#
# CB_P[,,y,] <- TACusedE * ratio * temp # scale up total removals
#
# chk <- apply(CB_P[,,y,], 1, sum) > availB # total removals can't be more than available biomass
# if (sum(chk)>0) {
# c_temp <- apply(CB_P[chk,,y,, drop=FALSE], 1, sum)
# ratio_temp <- (availB[chk]/c_temp) * 0.99
# if (sum(chk)>1) CB_P[chk,,y, ] <- CB_P[chk,,y,] * array(ratio_temp, dim=c(sum(chk), maxage, nareas))
# if (sum(chk)==1) CB_P[chk,,y, ] <- CB_P[chk,,y,] * array(ratio_temp, dim=c(maxage, nareas))
# }
#
# # temp <- CB_P[SAYR]/(Biomass_P[SAYR] * exp(-M_ageArray[SAYt]/2)) # Pope's approximation
# # temp[temp > (1 - exp(-maxF))] <- 1 - exp(-maxF) # apply maxF constraint
# # FM_P[SAYR] <- -log(1 - temp)
#
# # Calculate F by age class and area & apply maxF constraint
# temp1 <- sapply(1:nsim, function(sim)
# CalculateF(CB_P[sim,,y,], M_ageArray[sim,,y], V_P[sim,,y], Biomass_P[sim,,y,], maxF=maxF, byage=TRUE))
#
# temp <- as.vector(aperm(temp1, c(2,1)))
#
# FM_P[SAYR] <- temp
# Z_P[SAYR] <- FM_P[SAYR] + M_ageArray[SAYt] # calculate total mortality
# # update removals with maxF constraint
# CB_P[SAYR] <- FM_P[SAYR]/Z_P[SAYR] * Biomass_P[SAYR] * (1 - exp(-Z_P[SAYR]))
#
# # t2 <- apply(CB_P[,,y,],1, sum)
#
# Fs <- sapply(1:nsim, function(sim)
# CalculateF(Catch_age_area=CB_P[sim,,y,], M_at_Age=M_ageArray[sim,,y],
# Vuln_age=V_P[sim,,y], B_age_area=Biomass_P[sim,,y,],
# maxF=maxF, byage=FALSE))
# Fs/FMSY
# apply(CB_P[,,y,], 1, sum)
# TACused
#
#
# Data@OM$A[x] * (1-exp(-Fs[x]))
# Data@OM$A[x] * (1-exp(-FMSY[x]))
#
#
# # repeated because of approximation error in Pope's approximation - an issue if CB_P ~ AvailB
# # chk <- apply(CB_P[,,y,], 1, sum) > availB # total removals can't be more than available biomass
# #
# # if (sum(chk)>0) {
# # c_temp <- apply(CB_P[chk,,y,, drop=FALSE], 1, sum)
# # ratio_temp <- (availB[chk]/c_temp) * 0.99
# # if (sum(chk)>1) CB_P[chk,,y, ] <- CB_P[chk,,y,] * array(ratio_temp, dim=c(sum(chk), maxage, nareas))
# # if (sum(chk)==1) CB_P[chk,,y, ] <- CB_P[chk,,y,] * array(ratio_temp, dim=c(maxage, nareas))
# # }
#
# # retained catch
# # temp <- CB_Pret[SAYR]/(Biomass_P[SAYR] * exp(-M_ageArray[SAYt]/2)) # Pope's approximation
# # temp[temp > (1 - exp(-maxF))] <- 1 - exp(-maxF) # apply maxF constraint
# # FM_Pret[SAYR] <- -log(1 - temp)
#
# # retained catch with maxF constraint
# temp1 <- sapply(1:nsim, function(sim)
# CalculateF(CB_Pret[sim,,y,], M_ageArray[sim,,y], V_P[sim,,y], Biomass_P[sim,,y,], maxF=maxF, byage=TRUE))
# temp <- as.vector(aperm(temp1, c(2,1)))
# FM_Pret[SAYR] <- temp
# # update retained catch
# CB_Pret[SAYR] <- FM_Pret[SAYR]/Z_P[SAYR] * Biomass_P[SAYR] * (1 - exp(-Z_P[SAYR]))
#
# # M_age_area <- array(M_ageArray[,,y], dim=c(nsim, maxage, nareas))
#
#
#
#
#
#
# # Fs <- suppressWarnings(-log(1 - apply(CB_P[, , y, ], 1, sum)/apply(VBiomass_P[, , y, ]*exp(-(0.5*M_age_area)), 1, sum))) # Pope's approx
# # Fs[!is.finite(Fs)] <- 2 # NaN for very high Fs
#
# Effort <- Fs/(FinF * qs*qvar[,y]* (1 + qinc/100)^y) * apply(fracE2, 1, sum)
#
# # Make sure Effort doesn't exceed regulated effort
# if (length(MPRecs$Effort) >0 | all(LastEi != 1)) { # an effort regulation also exists
# aboveE <- which(Effort > Ei)
# if (length(aboveE)>0) {
# Effort[aboveE] <- Ei[aboveE] * FinF[aboveE] * apply(fracE2, 1, sum)[aboveE]
# SAYR <- as.matrix(expand.grid(aboveE, 1:maxage, y, 1:nareas))
# SAYRt <- as.matrix(expand.grid(aboveE, 1:maxage, y + nyears, 1:nareas)) # Trajectory year
# SYt <- SAYRt[, c(1, 3)]
# SAYt <- SAYRt[, 1:3]
# SR <- SAYR[, c(1, 4)]
# S1 <- SAYR[, 1]
# SY1 <- SAYR[, c(1, 3)]
# FM_P[SAYR] <- (FinF[S1] * Ei[S1] * V_P[SAYt] * t(Si)[SR] * fishdist[SR] * qvar[SY1] *
# (qs[S1]*(1 + qinc[S1]/100)^y))/Asize[SR]
#
# # retained fishing mortality with input control recommendation
# FM_Pret[SAYR] <- (FinF[S1] * Ei[S1] * retA_P[SAYt] * t(Si)[SR] * fishdist[SR] *
# qvar[SY1] * qs[S1]*(1 + qinc[S1]/100)^y)/Asize[SR]
#
# Z_P[SAYR] <- FM_P[SAYR] + M_ageArray[SAYt] # calculate total mortality
#
# CB_P[SAYR] <- FM_P[SAYR]/Z_P[SAYR] * Biomass_P[SAYR] * (1 - exp(-Z_P[SAYR]))
# CB_Pret[SAYR] <- FM_Pret[SAYR]/Z_P[SAYR] * Biomass_P[SAYR] * (1 - exp(-Z_P[SAYR]))
# }
#
# }
#
# }
#
# # Returns
# out <- list()
# out$TACrec <- TACused
# out$V_P <- V_P
# out$SLarray_P <- SLarray_P
# out$retA_P <- retA_P
# out$retL_P <- retL_P
# out$Fdisc_P <- Fdisc_P
# out$VBiomass_ <- VBiomass_P
# out$Z_P <- Z_P
# out$FM_P <- FM_P
# out$FM_Pret <- FM_Pret
# out$CB_P <- CB_P
# out$CB_Pret <- CB_Pret
# out$Si <- Si
# out$Ai <- Ai
# out$Ei <- Ei
# out$Effort <- Effort
# out
# }
#
# getMSY <- function(x, MatAge, LenAge, WtAge, MatureAge, VAge, maxage, R0, SRrel, hs) {
#
# opt <- optimize(MSYCalcs, log(c(0.001, 10)), MatAge=MatAge[x,], LenAge=LenAge[x,],
# WtAge=WtAge[x,], MatureAge=MatureAge[x,],
# VAge=VAge[x,], maxage=maxage, R0=R0[x], SRrel=SRrel[x], hs=hs[x], opt=1)
#
#
# runMod <- MSYCalcs(logapicF=opt$minimum, MatAge=MatAge[x,], LenAge=LenAge[x,],
# WtAge=WtAge[x,], MatureAge=MatureAge[x,],
# VAge=VAge[x,], maxage=maxage, R0=R0[x], SRrel=SRrel[x], hs=hs[x], opt=2)
#
# runMod
# }
#
# MSYCalcs <- function(logapicF, M_at_Age, WtAge, MatureAge, VAge, maxage, R0, SRrel, hs, opt=1) {
# # Box 3.1 Walters & Martell 2004
# U <- exp(logU)
# lx <- l0 <- rep(1, maxage)
# for (a in 2:maxage) {
# l0[a] <- l0[a-1] * exp(-M_at_Age[a-1])
# lx[a] <- lx[a-1] * exp(-M_at_Age[a-1]) * (1-U*VAge[a-1])
# }
# Egg0 <- sum(l0 * WtAge * MatureAge) # unfished egg production (assuming fecundity proportional to weight)
# EggF <- sum(lx * WtAge * MatureAge) # fished egg production (assuming fecundity proportional to weight)
#
# vB0 <- sum(l0 * WtAge * VAge)
# vBF <- sum(lx * WtAge * VAge)
#
# SB0 <- sum(l0 * WtAge * MatureAge) # same as eggs atm
# SBF <- sum(lx * WtAge * MatureAge)
#
# B0 <- sum(l0 * WtAge)
# BF <- sum(lx * WtAge)
#
# hs[hs>0.999] <- 0.999
# recK <- (4*hs)/(1-hs) # Goodyear compensation ratio
# reca <- recK/Egg0
# if (SRrel ==1) {
# recb <- (reca * Egg0 - 1)/(R0*Egg0) # BH SRR
# RelRec <- (reca * EggF-1)/(recb*EggF)
# }
# if (SRrel ==2) {
# bR <- (log(5*hs)/(0.8*SB0))
# aR <- exp(bR*SB0)/(SB0/R0)
# RelRec <- (log(aR*EggF/R0))/(bR*EggF/R0)
# }
#
# RelRec[RelRec<0] <- 0
#
# Fa <- apicF*VAge
# Za <- Fa + M_at_Age
# relyield <- Fa/Za * lx * (1-exp(-Za)) * WtAge
# YPR <- sum(relyield)
# Yield <- YPR * RelRec
#
# if (opt == 1) return(-Yield)
# if (opt == 2) {
# out <- c(Yield=Yield,
# F= CalculateF(relyield * RelRec, M_at_Age, VAge, lx * WtAge * RelRec),
# SB = SBF * RelRec,
# SB_SB0 = (SBF * RelRec)/(SB0 * R0),
# B_B0 = (BF * RelRec)/(B0 * R0),
# B = BF * RelRec,
# VB = vBF * RelRec,
# VB_VB0 = (vBF * RelRec)/(vB0 * R0),
# RelRec=RelRec,
# SB0 = SB0 * R0,
# B0=B0 * R0,
# apicF=apicF)
#
# return(out)
# }
# }
calcF <- function(x, TACusedE, V_P, Biomass_P, fishdist, Asize, maxage, nareas,
M_ageArray,nyears, y) {
ct <- TACusedE[x]
ft <- ct/sum(Biomass_P[x,,y,] * V_P[x,,y+nyears]) # initial guess
for (i in 1:50) {
Fmat <- ft * matrix(V_P[x,,y+nyears], nrow=maxage, ncol=nareas) *
matrix(fishdist[x,], maxage, nareas, byrow=TRUE)/
matrix(Asize[x,], maxage, nareas, byrow=TRUE) # distribute F over age and areas
Zmat <- Fmat + matrix(M_ageArray[x,,y+nyears], nrow=maxage, ncol=nareas, byrow=FALSE)
predC <- Fmat/Zmat * (1-exp(-Zmat)) * Biomass_P[x,,y,] # predicted catch
pct <- sum(predC)
Omat <- (1-exp(-Zmat)) * Biomass_P[x,,y,]
# derivative of catch wrt ft
dct <- sum(Omat/Zmat - ((Fmat * Omat)/Zmat^2) + Fmat/Zmat * exp(-Zmat) * Biomass_P[x,,y,])
ft <- ft - (pct - ct)/dct
if (abs(pct - ct)<1E-6) break;
}
ft
}
#' Internal wrapper function to calculate MSY reference points
#'
#' @param x Simulation number
#' @param M_ageArray Array of M-at-age
#' @param Wt_age Array of weight-at-age
#' @param Mat_age Array of maturity-at-age
#' @param V Array of selectivity-at-age
#' @param maxage Vector of maximum age
#' @param R0 Vector of R0s
#' @param SRrel SRR type
#' @param hs Vector of steepness
#' @param yr.ind Year index used in calculations
#' @param plusgroup Integer. Default = 0 = no plus-group. Use 1 to include a plus-group
#' @return Results from `MSYCalcs`
#' @export
#'
#' @keywords internal
optMSY_eq <- function(x, M_ageArray, Wt_age, Mat_age, V, maxage, R0, SRrel, hs,
yr.ind=1, plusgroup=0) {
if (length(yr.ind)==1) {
M_at_Age <- M_ageArray[x,,yr.ind]
Wt_at_Age <- Wt_age[x,, yr.ind]
Mat_at_Age <- Mat_age[x,, yr.ind]
V_at_Age <- V[x,, yr.ind]
} else {
M_at_Age <- apply(M_ageArray[x,,yr.ind], 1, mean)
Wt_at_Age <- apply(Wt_age[x,, yr.ind], 1, mean)
Mat_at_Age <- apply(Mat_age[x,, yr.ind], 1, mean)
V_at_Age <- apply(V[x,, yr.ind], 1, mean)
}
boundsF <- c(1E-8, 3)
doopt <- optimise(MSYCalcs, log(boundsF), M_at_Age, Wt_at_Age, Mat_at_Age,
V_at_Age, maxage, R0x=R0[x], SRrelx=SRrel[x], hx=hs[x], opt=1,
plusgroup=plusgroup)
#UMSY <- exp(doopt$minimum)
MSYs <- MSYCalcs(doopt$minimum, M_at_Age, Wt_at_Age, Mat_at_Age,
V_at_Age, maxage, R0x=R0[x], SRrelx=SRrel[x], hx=hs[x], opt=2,
plusgroup=plusgroup)
return(MSYs)
}
#' Internal function to calculate MSY Reference Points
#'
#' @param logF log fishing mortality
#' @param M_at_Age Vector of M-at-age
#' @param Wt_at_Age Vector of weight-at-age
#' @param Mat_at_Age Vector of maturity-at-age
#' @param V_at_Age Vector of selectivity-at-age
#' @param maxage Maximum age
#' @param R0x R0 for this simulation
#' @param SRrelx SRR type for this simulation
#' @param hx numeric. Steepness value for this simulation
#' @param opt Option. 1 = return -Yield, 2= return all MSY calcs
#' @param plusgroup Integer. Default = 0 = no plus-group. Use 1 to include a plus-group
#' @return See `opt`
#' @export
#'
#' @keywords internal
MSYCalcs <- function(logF, M_at_Age, Wt_at_Age, Mat_at_Age, V_at_Age,
maxage, R0x, SRrelx, hx, opt=1, plusgroup=0) {
# Box 3.1 Walters & Martell 2004
FF <- exp(logF)
lx <- rep(1, maxage)
l0 <- c(1, exp(cumsum(-M_at_Age[1:(maxage-1)]))) # unfished survival
surv <- exp(-M_at_Age - FF * V_at_Age)
for (a in 2:maxage) {
lx[a] <- lx[a-1] * surv[a-1] # fished survival
}
if (plusgroup == 1) {
l0[length(l0)] <- l0[length(l0)]/(1-exp(-M_at_Age[length(l0)]))
lx[length(lx)] <- lx[length(lx)]/(1-surv[length(lx)])
}
Egg0 <- sum(l0 * Wt_at_Age * Mat_at_Age) # unfished egg-per-recruit (assuming fecundity proportional to weight)
EggF <- sum(lx * Wt_at_Age * Mat_at_Age) # fished egg-per-recruit (assuming fecundity proportional to weight)
vB0 <- sum(l0 * Wt_at_Age * V_at_Age) # unfished and fished vuln. biomass per-recruit
vBF <- sum(lx * Wt_at_Age * V_at_Age)
SB0 <- sum(l0 * Wt_at_Age * Mat_at_Age) # spawning biomas per-recruit - same as eggs atm
SBF <- sum(lx * Wt_at_Age * Mat_at_Age)
B0 <- sum(l0 * Wt_at_Age) # biomass-per-recruit
BF <- sum(lx * Wt_at_Age)
hx[hx>0.999] <- 0.999
recK <- (4*hx)/(1-hx) # Goodyear compensation ratio
reca <- recK/Egg0
SPR <- EggF/Egg0
# Calculate equilibrium recruitment at this SPR
if (SRrelx ==1) { # BH SRR
recb <- (reca * Egg0 - 1)/(R0x*Egg0)
RelRec <- (reca * EggF-1)/(recb*EggF)
}
if (SRrelx ==2) { # Ricker
bR <- (log(5*hx)/(0.8*SB0))
aR <- exp(bR*SB0)/(SB0/R0x)
RelRec <- (log(aR*EggF/R0x))/(bR*EggF/R0x)
}
RelRec[RelRec<0] <- 0
Z_at_Age <- FF * V_at_Age + M_at_Age
YPR <- sum(lx * Wt_at_Age * FF * V_at_Age * (1 - exp(-Z_at_Age))/Z_at_Age)
Yield <- YPR * RelRec
if (opt == 1) return(-Yield)
if (opt == 2) {
out <- c(Yield=Yield,
F= FF,
SB = SBF * RelRec,
SB_SB0 = (SBF * RelRec)/(SB0 * R0x),
B_B0 = (BF * RelRec)/(B0 * R0x),
B = BF * RelRec,
VB = vBF * RelRec,
VB_VB0 = (vBF * RelRec)/(vB0 * R0x),
RelRec=RelRec,
SB0 = SB0 * R0x,
B0=B0 * R0x)
return(out)
}
}
# optMSY_eq <- function(x, M_ageArray, Wt_age, Mat_age, V, maxage, R0, SRrel, hs, yr=1) {
# boundsU <- c(0.0000001, 1)
#
# doopt <- optimise(MSYCalcs, log(boundsU), M_at_Age=M_ageArray[x,,yr], WtAge=Wt_age[x,,yr],
# MatureAge=Mat_age[x,,yr], VAge=V[x,,yr], maxage, R0=R0[x], SRrel=SRrel[x], hs=hs[x], opt=1)
#
# apicFMSY <- exp(doopt$minimum)
# apicFMSY2 <- apicFMSY
#
# MSYs <- MSYCalcs(log(apicFMSY), M_at_Age=M_ageArray[x,,yr], WtAge=Wt_age[x,,yr],
# MatureAge=Mat_age[x,,yr], VAge=V[x,,yr], maxage, R0=R0[x], SRrel=SRrel[x], hs=hs[x], opt=2)
#
# if (MSYs[1] < 1) {
# count <- 0; stop <- FALSE
# while (apicFMSY > 0.95 * max(bounds) & count < 50 & !stop) {
# count <- count + 1
# bounds <- c(0.0000001, max(bounds)-0.1)
# if (bounds[1] < bounds[2]) {
# doopt <- optimise(MSYCalcs, log(bounds), M_at_Age=M_ageArray[x,,yr], WtAge=Wt_age[x,,yr],
# MatureAge=Mat_age[x,,yr], VAge=V[x,,yr], maxage, R0=R0[x], SRrel=SRrel[x], hs=hs[x], opt=1)
# apicFMSY <- exp(doopt$minimum)
# } else {
# stop <- TRUE
# }
# }
# if (count >=50 | stop) apicFMSY <- apicFMSY2
# MSYs <- MSYCalcs(log(apicFMSY), M_at_Age=M_ageArray[x,,yr], WtAge=Wt_age[x,,yr],
# MatureAge=Mat_age[x,,yr], VAge=V[x,,yr], maxage, R0=R0[x], SRrel=SRrel[x], hs=hs[x], opt=2)
# }
# return(MSYs)
#
# }
split.along.dim <- function(a, n) {
stats::setNames(lapply(split(a, arrayInd(seq_along(a), dim(a))[, n]),
array, dim = dim(a)[-n], dimnames(a)[-n]),
dimnames(a)[[n]])
}
#' optimize for catchability (q)
#'
#' Function optimizes catchability (q, where F=qE) required to get to user-specified stock
#' depletion
#'
#' @param x Integer, the simulation number
#' @param D A numeric vector nsim long of sampled depletion
#' @param SSB0 A numeric vector nsim long of total unfished spawning biomass
#' @param nareas The number of spatial areas
#' @param maxage The maximum age
#' @param N Array of the numbers-at-age in population. Dimensions are nsim, maxage, nyears, nareas.
#' Only values from the first year (i.e `N[,,1,]`) are used, which is the current N-at-age.
#' @param pyears The number of years to project forward. Equal to 'nyears' for optimizing for q.
#' @param M_ageArray An array (dimensions nsim, maxage, nyears+proyears) with the natural mortality-at-age and year
#' @param Mat_age An array (dimensions nsim, maxage, proyears+nyears) with the proportion mature for each age-class
#' @param Asize A matrix (dimensions nsim, nareas) with size of each area
#' @param Wt_age An array (dimensions nsim, maxage, nyears+proyears) with the weight-at-age and year
#' @param V An array (dimensions nsim, maxage, nyears+proyears) with the vulnerability-at-age and year
#' @param retA An array (dimensions nsim, maxage, nyears+proyears) with the probability retained-at-age and year
#' @param Perr A matrix (dimensions nsim, nyears+proyears) with the recruitment deviations
#' @param mov An array (dimensions nsim, nareas, nareas, nyears+proyears) with the movement matrix
#' @param SRrel A numeric vector nsim long specifying the recruitment curve to use
#' @param Find A matrix (dimensions nsim, nyears) with the historical fishing effort
#' @param Spat_targ A numeric vector nsim long with the spatial targeting
#' @param hs A numeric vector nsim long with the steepness values for each simulation
#' @param R0a A matrix (dimensions nsim, nareas) with the unfished recruitment by area
#' @param SSBpR A matrix (dimensions nsim, nareas) with the unfished spawning-per-recruit by area
#' @param aR A numeric vector nareas long with the Ricker SRR a values
#' @param bR A numeric vector nareas long with the Ricker SRR b values
#' @param bounds A numeric vector of length 2 with bounds for the optimizer
#' @param maxF A numeric value specifying the maximum fishing mortality for any single age class
#' @param MPA A matrix of spatial closures by year
#' @param plusgroup Integer. Default = 0 = no plus-group. Use 1 to include a plus-group
#' @param VB0 numeric vector nsim long of total unfished vulnerable biomass
#' @param optVB Logical. Optimize for vulnerable biomass?
#' @author A. Hordyk
#' @keywords internal
getq3 <- function(x, D, SSB0, nareas, maxage, N, pyears, M_ageArray, Mat_age, Asize, Wt_age,
V, retA, Perr, mov, SRrel, Find, Spat_targ, hs, R0a, SSBpR, aR, bR,
bounds = c(1e-05, 15), maxF, MPA, plusgroup, VB0, optVB) {
opt <- optimize(optQ, log(bounds), depc=D[x], SSB0c=SSB0[x], nareas, maxage, Ncurr=N[x,,1,],
pyears, M_age=M_ageArray[x,,], MatAge=Mat_age[x,,], Asize_c=Asize[x,], WtAge=Wt_age[x,,],
Vuln=V[x,,], Retc=retA[x,,], Prec=Perr[x,], movc=split.along.dim(mov[x,,,,],4),
SRrelc=SRrel[x],
Effind=Find[x,], Spat_targc=Spat_targ[x], hc=hs[x], R0c=R0a[x,],
SSBpRc=SSBpR[x,], aRc=aR[x,], bRc=bR[x,], maxF=maxF, MPA=MPA,
plusgroup=plusgroup, VB0[x], optVB)
return(exp(opt$minimum))
}
#' Optimize q for a single simulation
#'
#' @param logQ log q
#' @param depc Depletion value
#' @param SSB0c Unfished spawning biomass
#' @param nareas Number of areas
#' @param maxage Maximum age
#' @param Ncurr Current N-at-age
#' @param pyears Number of years to project population dynamics
#' @param M_age M-at-age
#' @param Asize_c Numeric vector (length nareas) with size of each area
#' @param MatAge Maturity-at-age
#' @param WtAge Weight-at-age
#' @param Vuln Vulnerability-at-age
#' @param Retc Retention-at-age
#' @param Prec Recruitment error by year
#' @param movc movement matrix
#' @param SRrelc SR parameter
#' @param Effind Historical fishing effort
#' @param Spat_targc Spatial targetting
#' @param hc Steepness
#' @param R0c Unfished recruitment by area
#' @param SSBpRc Unfished spawning biomass per recruit by area
#' @param aRc Ricker aR
#' @param bRc Ricker bR
#' @param maxF maximum F
#' @param MPA A matrix of spatial closures by year
#' @param plusgroup Integer. Default = 0 = no plus-group. Use 1 to include a plus-group
#' @param VB0c Unfished vulnerable biomass
#' @param optVB Logical. Optimize for vulnerable biomass?
#' @author A. Hordyk
#' @keywords internal
optQ <- function(logQ, depc, SSB0c, nareas, maxage, Ncurr, pyears, M_age, Asize_c,
MatAge, WtAge, Vuln, Retc, Prec, movc, SRrelc, Effind, Spat_targc, hc,
R0c, SSBpRc, aRc, bRc, maxF, MPA, plusgroup, VB0c, optVB) {
simpop <- popdynCPP(nareas, maxage, Ncurr, pyears, M_age, Asize_c,
MatAge, WtAge, Vuln, Retc, Prec, movc, SRrelc, Effind, Spat_targc, hc,
R0c=R0c, SSBpRc=SSBpRc, aRc=aRc, bRc=bRc, Qc=exp(logQ), Fapic=0,
maxF=maxF, MPA=MPA, control=1, SSB0c=SSB0c,
plusgroup=plusgroup)
ssb <- sum(simpop[[4]][,pyears,])
vb <- sum(simpop[[5]][,pyears,])
if (optVB) {
return((log(depc) - log(vb/VB0c))^2)
}
else {
return((log(depc) - log(ssb/SSB0c))^2)
}
}
# #' Population dynamics model
# #'
# #' @param nareas Integer. The number of spatial areas
# #' @param maxage Integer. The maximum age
# #' @param Ncurr Numeric matrix (dimensions maxage, nareas) with the current N-at-age
# #' @param pyears Integer. Number of years to project the model forward
# #' @param M_age Numeric matrix (dimensions maxage, pyears) with natural mortality at age
# #' @param Asize_c Numeric vector (length nareas) with size of each area
# #' @param MatAge Numeric matrix (dimensions maxage, nyears+proyears) with proportion mature for each age-class
# #' @param WtAge Numeric matrix (dimensions maxage, pyears) with weight-at-age
# #' @param Vuln Numeric matrix (dimensions maxage, pyears) with proportion vulnerable-at-age
# #' @param Retc Numeric matrix (dimensions maxage, pyears) with proportion retained-at-age
# #' @param Prec Numeric vector (length pyears) with recruitment error
# #' @param movc Numeric matrix (dimensions nareas, nareas) with movement matrix
# #' @param SRrelc Integer. Stock-recruitment curve
# #' @param Effind Numeric vector (length pyears) with the fishing effort by year
# #' @param Spat_targc Integer. Value of spatial targetting
# #' @param hc Numeric. Steepness of stock-recruit relationship
# #' @param R0c Numeric vector of length nareas with unfished recruitment by area
# #' @param SSBpRc Numeric vector of length nareas with unfished spawning per recruit by area
# #' @param aRc Numeric. Ricker SRR a value
# #' @param bRc Numeric. Ricker SRR b value
# #' @param Qc Numeric. Catchability coefficient
# #' @param Fapic Numeric. Apical F value
# #' @param maxF A numeric value specifying the maximum fishing mortality for any single age class
# #' @param MPA A matrix of spatial closures by year
# #' @param control Integer. 1 to use q and effort to calculate F, 2 to use Fapic (apical F) and
# #' vulnerablity to calculate F.
# #'
# #' @author A. Hordyk
# #'
# #' @return A named list of length 8 containing with arrays (dimensions: maxage, pyears, nareas)
# #' containing numbers-at-age, biomass-at-age, spawning stock numbers, spawning biomass,
# #' vulnerable biomass, fishing mortality, retained fishing mortality, and total mortality
# # #' @export
# #'
# popdyn <- function(nareas, maxage, Ncurr, pyears, M_age, Asize_c,
# MatAge, WtAge, Vuln, Retc, Prec, movc, SRrelc, Effind, Spat_targc, hc,
# R0c, SSBpRc, aRc, bRc, Qc, Fapic=NULL, maxF, MPA, control=1) {
# Narray <- array(NA, dim=c(maxage, pyears, nareas))
# Barray <- array(NA, dim=c(maxage, pyears, nareas))
# SSNarray <- array(NA, dim=c(maxage, pyears, nareas))
# SBarray <- array(NA, dim=c(maxage, pyears, nareas))
# VBarray <- array(NA, dim=c(maxage, pyears, nareas))
# Marray <- array(NA, dim=c(maxage, pyears, nareas))
# FMarray <- array(NA, dim=c(maxage, pyears, nareas))
# FMretarray <- array(NA, dim=c(maxage, pyears, nareas))
# Zarray <- array(NA, dim=c(maxage, pyears, nareas))
#
# Narray[,1,] <- Ncurr
# Barray[,1,] <- Narray[,1,] * WtAge[,1]
# SSNarray[,1,] <- Ncurr * MatAge[,1] # spawning stock numbers
# SBarray[,1,] <- Narray[,1,] * WtAge[,1] * MatAge[,1] # spawning biomass
# VBarray[,1,] <- Narray[,1,] * WtAge[,1] * Vuln[,1] # vulnerable biomass
# Marray[,1,] <- M_age[,1] # M-at-age
#
# SAYR <- as.matrix(expand.grid(1:maxage, 1, 1:nareas)) # Set up some array indexes age (A) year (Y) region/area (R)
#
# # Distribution of fishing effort
# VBa <- colSums(VBarray[,1,]) # total vuln biomass in each area
#
# # fishdist <- VBa^Spat_targc/mean(VBa^Spat_targc)
# fishdist <- VBa^Spat_targc/sum(VBa^Spat_targc)
#
# Asize_mat <- matrix(Asize_c, nrow=maxage, ncol=nareas, byrow=TRUE)
#
# if (control == 1) {
# FMarray[SAYR] <- (Effind[SAYR[,2]] * Qc * Vuln[SAYR[,1:2]] * fishdist[SAYR[,3]])/Asize_mat
# FMretarray[SAYR] <- (Effind[SAYR[,2]] * Qc * Retc[SAYR[,1:2]] * fishdist[SAYR[,3]])/Asize_mat
# }
# if (control == 2) {
# FMarray[SAYR] <- (Fapic * Vuln[SAYR[,1:2]] * fishdist[SAYR[,3]])/Asize_mat
# FMretarray[SAYR] <- (Fapic * Retc[SAYR[,1:2]] * fishdist[SAYR[,3]])/Asize_mat
# }
#
# FMarray[,1,][FMarray[,1,] > (1 - exp(-maxF))] <- 1 - exp(-maxF)
# FMretarray[,1,][FMretarray[,1,] > (1 - exp(-maxF))] <- 1 - exp(-maxF)
#
# Zarray[,1,] <- Marray[,1,] + FMarray[,1,]
#
# for (y in 1:(pyears-1)) {
#
# NextYrN <- popdynOneTS(nareas, maxage, SSBcurr=colSums(SBarray[,y,]), Ncurr=Narray[,y,],
# Zcurr=Zarray[,y,], PerrYr=Prec[y+maxage+1], hc, R0c, SSBpRc, aRc, bRc,
# movc, SRrelc)
#
# Narray[,y+1,] <- NextYrN
# Barray[,y+1,] <- Narray[,y+1,] * WtAge[,y+1]
# SSNarray[,y+1,] <- Narray[,y+1,] * MatAge[,y+1] # spawning stock numbers
# SBarray[,y+1,] <- Narray[,y+1,] * WtAge[,y+1] * MatAge[,y+1] # spawning biomass
# VBarray[,y+1,] <- Narray[,y+1,] * WtAge[,y+1] * Vuln[,y+1] # vulnerable biomass
# Marray[, y+1, ] <- M_age[,y+1]
#
# # Distribution of fishing effort
# VBa <- colSums(VBarray[,y+1,]) # total vuln biomass in each area
# # fishdist <- VBa^Spat_targc/mean(VBa^Spat_targc)
# fishdist <- VBa^Spat_targc/sum(VBa^Spat_targc)
#
# d1 <- t(matrix(MPA[y,])) * fishdist # distribution of fishing effort
# fracE <- apply(d1, 1, sum) # fraction of current effort in open areas
# fracE2 <- d1 * (fracE + (1-fracE))/fracE # re-distribution of fishing effort
# fishdist <- fracE2 # fishing effort by area
#
#
# SAYR <- as.matrix(expand.grid(1:maxage, y+1, 1:nareas)) # Set up some array indexes age (A) year (Y) region/area (R)
# if (control ==1) {
# FMarray[SAYR] <- (Effind[SAYR[,2]] * Qc * Vuln[SAYR[,1:2]] * fishdist[SAYR[,3]])/Asize_mat
# FMretarray[SAYR] <- (Effind[SAYR[,2]] * Qc * Retc[SAYR[,1:2]] * fishdist[SAYR[,3]])/Asize_mat
# }
# if (control ==2) {
# FMarray[SAYR] <- (Fapic * Vuln[SAYR[,1:2]] * fishdist[SAYR[,3]])/Asize_mat
# FMretarray[SAYR] <- (Fapic * Retc[SAYR[,1:2]] * fishdist[SAYR[,3]])/Asize_mat
# }
# FMarray[SAYR][FMarray[SAYR] > (1 - exp(-maxF))] <- 1 - exp(-maxF)
# FMretarray[SAYR][FMretarray[SAYR] > (1 - exp(-maxF))] <- 1 - exp(-maxF)
# Zarray[,y+1,] <- Marray[,y+1,] + FMarray[,y+1,]
#
# }
#
# out <- list()
# out$Narray <- Narray
# out$Barray <- Barray
# out$SSNarray <- SSNarray
# out$SBarray <- SBarray
# out$VBarray <- VBarray
# out$FMarray <- FMarray
# out$FMretarray <- FMretarray
# out$Zarray <- Zarray
#
# out
# }
#
# #' Population dynamics model for one annual time-step
# #'
# #' Project population forward one time-step given current numbers-at-age and total mortality
# #'
# #' @param nareas The number of spatial areas
# #' @param maxage The maximum age
# #' @param SSBcurr A numeric vector of length nareas with the current spawning biomass in each area
# #' @param Ncurr A numeric matrix (maxage, nareas) with current numbers-at-age in each area
# #' @param Zcurr A numeric matrix (maxage, nareas) with total mortality-at-age in each area
# #' @param PerrYr A numeric value with recruitment deviation for current year
# #' @param hs Steepness of SRR
# #' @param R0c Numeric vector with unfished recruitment by area
# #' @param SSBpRc Numeric vector with unfished spawning stock per recruit by area
# #' @param aRc Numeric vector with Ricker SRR a parameter by area
# #' @param bRc Numeric vector with Ricker SRR b parameter by area
# #' @param movc Numeric matrix (nareas by nareas) with the movement matrix
# #' @param SRrelc Integer indicating the stock-recruitment relationship to use (1 for Beverton-Holt, 2 for Ricker)
# #' @author A. Hordyk
# #'
# # #' @export
# #' @keywords internal
# popdynOneTS <- function(nareas, maxage, SSBcurr, Ncurr, Zcurr,
# PerrYr, hc, R0c, SSBpRc, aRc, bRc, movc, SRrelc) {
#
# # set up some indices for indexed calculation
#
# indMov <- as.matrix(expand.grid(1:maxage,1:nareas, 1:nareas)) # Movement master index
# indMov2 <- indMov[, c(1, 2)] # Movement from index
# indMov3 <- indMov[, c(2, 3)] # Movement to index
#
# Nnext <- array(NA, dim=c(maxage, nareas))
#
# # Recruitment assuming regional R0 and stock wide steepness
# if (SRrelc[1] == 1) {
# Nnext[1, ] <- PerrYr * (4 * R0c * hc * SSBcurr)/(SSBpRc * R0c * (1-hc) + (5*hc-1)*SSBcurr)
# } else {
# # most transparent form of the Ricker uses alpha and beta params
# Nnext[1, ] <- PerrYr * aRc * SSBcurr * exp(-bRc * SSBcurr)
# }
#
# # Mortality
# Nnext[2:maxage, ] <- Ncurr[1:(maxage - 1), ] * exp(-Zcurr[1:(maxage - 1), ]) # Total mortality
#
# # Movement of stock
# temp <- array(Nnext[indMov2] * movc[indMov3], dim = c(maxage,nareas, nareas)) # Move individuals
# Nnext <- apply(temp, c(1, 3), sum)
#
# # Numbers-at-age at beginning of next year
# return(Nnext)
#
# }
#
#
# #' Simulate population dynamics for historical years
# #'
# #' @param x Integer, the simulation number
# #' @param nareas The number of spatial areas
# #' @param maxage The maximum age
# #' @param N Array of the numbers-at-age in population. Dimensions are nsim, maxage, nyears, nareas.
# #' Only values from the first year (i.e `N[,,1,]`) are used, which is the current N-at-age.
# #' @param pyears The number of years to project forward. Equal to 'nyears' for optimizing for q.
# #' @param M_ageArray An array (dimensions nsim, maxage, nyears+proyears) with the natural mortality-at-age and year
# #' @param Asize A matrix (dimensions nsim, nareas) of size of areas
# #' @param Mat_age A matrix (dimensions nsim, maxage) with the proportion mature for each age-class
# #' @param Wt_age An array (dimensions nsim, maxage, nyears+proyears) with the weight-at-age and year
# #' @param V An array (dimensions nsim, maxage, nyears+proyears) with the vulnerability-at-age and year
# #' @param retA An array (dimensions nsim, maxage, nyears+proyears) with the probability retained-at-age and year
# #' @param Perr A matrix (dimensions nsim, nyears+proyears) with the recruitment deviations
# #' @param mov An array (dimensions nsim, nareas, nareas) with the movement matrix
# #' @param SRrel A numeric vector nsim long specifying the recruitment curve to use
# #' @param Find A matrix (dimensions nsim, nyears) with the historical fishing effort
# #' @param Spat_targ A numeric vector nsim long with the spatial targeting
# #' @param hs A numeric vector nsim long with the steepness values for each simulation
# #' @param R0a A matrix (dimensions nsim, nareas) with the unfished recruitment by area
# #' @param SSBpR A matrix (dimensions nsim, nareas) with the unfished spawning-per-recruit by area
# #' @param aR A numeric vector nsim long with the Ricker SRR a values
# #' @param bR A numeric vector nsim long with the Ricker SRR b values
# #' @param qs A numeric vector nsim long with catchability coefficients
# #' @param MPA A matrix of spatial closures by year
# #' @param maxF A numeric value specifying the maximum fishing mortality for any single age class
# #' @param useCPP logical - use the CPP code? For testing purposes only
# #' @param SSB0 SSB0
# #' @author A. Hordyk
# #' @keywords internal
# #' @export
# simYears <- function(x, nareas, maxage, N, pyears, M_ageArray, Asize, Mat_age, Wt_age,
# V, retA, Perr, mov, SRrel, Find, Spat_targ, hs, R0a, SSBpR, aR, bR, qs,
# MPA, maxF, useCPP=TRUE, SSB0) {
# if(!useCPP) {
# # popdyn(nareas, maxage, Ncurr=N[x,,1,], pyears,
# # M_age=M_ageArray[x,,], Asize_c=Asize[x,], MatAge=Mat_age[x,,], WtAge=Wt_age[x,,],
# # Vuln=V[x,,], Retc=retA[x,,], Prec=Perr[x,], movc=mov[x,,,], SRrelc=SRrel[x],
# # Effind=Find[x,], Spat_targc=Spat_targ[x], hc=hs[x], R0c=R0a[x,],
# # SSBpRc=SSBpR[x,], aRc=aR[x,], bRc=bR[x,], Qc=qs[x], MPA=MPA, maxF=maxF, control=1)
# # doesn't currently work with age-based movement
# } else {
# popdynCPP(nareas, maxage, Ncurr=N[x,,1,], pyears,
# M_age=M_ageArray[x,,], Asize_c=Asize[x,], MatAge=Mat_age[x,,], WtAge=Wt_age[x,,],
# Vuln=V[x,,], Retc=retA[x,,], Prec=Perr[x,], movc=mov[x,,,], SRrelc=SRrel[x],
# Effind=Find[x,], Spat_targc=Spat_targ[x], hc=hs[x], R0c=R0a[x,],
# SSBpRc=SSBpR[x,], aRc=aR[x,], bRc=bR[x,], Qc=qs[x], Fapic=0, MPA=MPA, maxF=maxF,
# control=1, SSB0c=SSB0[x])
# }
#
# }
# #' Calculate FMSY and related metrics using Rcpp code
# #'
# #' @param x Integer, the simulation number
# #' @param Asize A matrix (nsim by nareas) with size of areas
# #' @param nareas The number of spatial areas
# #' @param maxage The maximum age
# #' @param N Array of the numbers-at-age in population. Dimensions are nsim, maxage, nyears, nareas.
# #' Only values from the first year (i.e `N[,,1,]`) are used, which is the current N-at-age.
# #' @param pyears The number of years to project forward. Equal to 'nyears' for optimizing for q.
# #' @param M_ageArray An array (dimensions nsim, maxage, nyears+proyears) with the natural mortality-at-age and year
# #' @param Mat_age A matrix (dimensions nsim, maxage) with the proportion mature for each age-class
# #' @param Wt_age An array (dimensions nsim, maxage, nyears+proyears) with the weight-at-age and year
# #' @param V An array (dimensions nsim, maxage, nyears+proyears) with the vulnerability-at-age and year
# #' @param retA An array (dimensions nsim, maxage, nyears+proyears) with the probability retained-at-age and year
# #' @param Perr A matrix (dimensions nsim, nyears+proyears) with the recruitment deviations
# #' @param mov An array (dimensions nsim, nareas, nareas) with the movement matrix
# #' @param SRrel A numeric vector nsim long specifying the recruitment curve to use
# #' @param Find A matrix (dimensions nsim, nyears) with the historical fishing effort
# #' @param Spat_targ A numeric vector nsim long with the spatial targeting
# #' @param hs A numeric vector nsim long with the steepness values for each simulation
# #' @param R0a A matrix (dimensions nsim, nareas) with the unfished recruitment by area
# #' @param SSBpR A matrix (dimensions nsim, nareas) with the unfished spawning-per-recruit by area
# #' @param aR A numeric vector nsim long with the Ricker SRR a values
# #' @param bR A numeric vector nsim long with the Ricker SRR b values
# #' @param SSB0 Unfished spawning biomass
# #' @param B0 Unfished total biomass
# #' @param MPA A matrix of spatial closures by year
# #' @param maxF A numeric value specifying the maximum fishing mortality for any single age class
# #' @param useCPP logical - use the CPP code? For testing purposes only
# #'
# #' @author A. Hordyk
# #'
# getFMSY3 <- function(x, Asize, nareas, maxage, N, pyears, M_ageArray, Mat_age, Wt_age,
# V, retA, Perr, mov, SRrel, Find, Spat_targ, hs, R0a, SSBpR, aR, bR,
# SSB0, B0, MPA, maxF, useCPP=TRUE) {
#
# opt <- optimize(optMSY, log(c(0.001, 10)), Asize_c=Asize[x,], nareas, maxage, Ncurr=N[x,,1,],
# pyears, M_age=M_ageArray[x,,], MatAge=Mat_age[x,,],
# WtAge=Wt_age[x,,], Vuln=V[x,,], Retc=retA[x,,], Prec=Perr[x,],
# movc=mov[x,,,], SRrelc=SRrel[x],
# Effind=Find[x,], Spat_targc=Spat_targ[x], hc=hs[x], R0c=R0a[x,],
# SSBpRc=SSBpR[x,], aRc=aR[x,], bRc=bR[x,], MPA=MPA, maxF=maxF, useCPP=useCPP,
# SSB0c=SSB0[x])
#
# MSY <- -opt$objective
#
# if (!useCPP) {
# # simpop <- popdyn(nareas, maxage, Ncurr=N[x,,1,],
# # pyears, M_age=M_ageArray[x,,], Asize_c=Asize[x,],
# # MatAge=Mat_age[x,,],
# # WtAge=Wt_age[x,,], Vuln=V[x,,], Retc=retA[x,,], Prec=Perr[x,],
# # movc=mov[x,,,], SRrelc=SRrel[x],
# # Effind=Find[x,], Spat_targc=Spat_targ[x], hc=hs[x], R0c=R0a[x,],
# # SSBpRc=SSBpR[x,], aRc=aR[x,], bRc=bR[x,], Fapic=exp(opt$minimum), MPA=MPA, maxF=maxF, control=2)
# #
# # # calculate B0 and SSB0 with current conditions
# # simpopF0 <- popdyn(nareas, maxage, Ncurr=N[x,,1,],
# # pyears, M_age=M_ageArray[x,,], Asize_c=Asize[x,],
# # MatAge=Mat_age[x,,],
# # WtAge=Wt_age[x,,], Vuln=V[x,,], Retc=retA[x,,], Prec=Perr[x,],
# # movc=mov[x,,,], SRrelc=SRrel[x],
# # Effind=Find[x,], Spat_targc=Spat_targ[x], hc=hs[x], R0c=R0a[x,],
# # SSBpRc=SSBpR[x,], aRc=aR[x,], bRc=bR[x,], Fapic=0, MPA=MPA, maxF=maxF, control=2)
#
# } else {
# simpop <- popdynCPP(nareas, maxage, Ncurr=N[x,,1,],
# pyears, M_age=M_ageArray[x,,], Asize_c=Asize[x,],
# MatAge=Mat_age[x,,],
# WtAge=Wt_age[x,,], Vuln=V[x,,], Retc=retA[x,,], Prec=Perr[x,],
# movc=mov[x,,,], SRrelc=SRrel[x],
# Effind=Find[x,], Spat_targc=Spat_targ[x], hc=hs[x], R0c=R0a[x,],
# SSBpRc=SSBpR[x,], aRc=aR[x,], bRc=bR[x,], Qc=0, Fapic=exp(opt$minimum),
# MPA=MPA, maxF=maxF, control=2, SSB0c = SSB0[x])
# # calculate B0 and SSB0 with current conditions
# simpopF0 <- popdynCPP(nareas, maxage, Ncurr=N[x,,1,],
# pyears, M_age=M_ageArray[x,,], Asize_c=Asize[x,],
# MatAge=Mat_age[x,,],
# WtAge=Wt_age[x,,], Vuln=V[x,,], Retc=retA[x,,], Prec=Perr[x,],
# movc=mov[x,,,], SRrelc=SRrel[x],
# Effind=Find[x,], Spat_targc=Spat_targ[x], hc=hs[x], R0c=R0a[x,],
# SSBpRc=SSBpR[x,], aRc=aR[x,], bRc=bR[x,], Qc=0, Fapic=0, MPA=MPA, maxF=maxF,
# control=2, SSB0c = SSB0[x])
# }
#
#
# ## Cn <- simpop[[7]]/simpop[[8]] * simpop[[1]] * (1-exp(-simpop[[8]])) # retained catch
# Cn <- simpop[[6]]/simpop[[8]] * simpop[[1]] * (1-exp(-simpop[[8]])) # removals
# Cb <- Cn[,pyears,] * Wt_age[x,,pyears]
#
# B <- sum(simpop[[2]][,pyears,] + Cb)
#
# SSB_MSY <- sum(simpop[[4]][,pyears,])
#
# V_BMSY <- sum(simpop[[5]][,pyears,])
# F_MSYv <- -log(1 - (MSY/(V_BMSY+MSY)))
#
#
# SSB0_curr <- sum(simpopF0[[4]][,pyears,])
# B0_curr <- sum(simpopF0[[2]][,pyears,])
# SSBMSY_SSB0 <- sum(simpop[[4]][,pyears,])/SSB0_curr
# BMSY_B0 <- sum(simpop[[2]][,pyears,])/B0_curr
# # SSBMSY_SSB0 <- sum(simpop[[4]][,pyears,])/SSB0[x]
# # BMSY_B0 <- sum(simpop[[2]][,pyears,])/B0[x]
#
#
# return(c(MSY = MSY, FMSY = F_MSYv, SSB = SSB_MSY, SSBMSY_SSB0=SSBMSY_SSB0,
# BMSY_B0=BMSY_B0, B = B, VB=V_BMSY+MSY))
#
# }
#
#
#
#
#' Optimize yield for a single simulation
#'
#' @param logFa log apical fishing mortality
#' @param Asize_c A vector of length areas with relative size of areas
#' @param nareas Number of area
#' @param maxage Maximum age
#' @param Ncurr Current N-at-age
#' @param pyears Number of projection years
#' @param M_age M-at-age
#' @param MatAge Maturity-at-age
#' @param WtAge Weight-at-age
#' @param Vuln Vulnerablity-at-age
#' @param Retc Retention-at-age
#' @param Prec Recruitment error
#' @param movc Movement matrix
#' @param SRrelc SR Relationship
#' @param Effind Historical effort
#' @param Spat_targc Spatial targeting
#' @param hc Steepness
#' @param R0c Unfished recruitment by area
#' @param SSBpRc Unfished spawning stock per recruit by area
#' @param aRc Ricker aR
#' @param bRc Ricker bR
#' @param Qc Catchability
#' @param MPA A matrix of spatial closures by year
#' @param maxF A numeric value specifying the maximum fishing mortality for any single age class
#' @param SSB0c SSB0
#' @param plusgroup Integer. Default = 0 = no plus-group. Use 1 to include a plus-group
#' @keywords internal
#'
#' @author A. Hordyk
#'
optMSY <- function(logFa, Asize_c, nareas, maxage, Ncurr, pyears, M_age,
MatAge, WtAge, Vuln, Retc, Prec, movc, SRrelc, Effind, Spat_targc, hc,
R0c, SSBpRc, aRc, bRc, Qc, MPA, maxF, SSB0c,
plusgroup=0) {
FMSYc <- exp(logFa)
simpop <- popdynCPP(nareas, maxage, Ncurr, pyears, M_age, Asize_c,
MatAge, WtAge, Vuln, Retc, Prec, movc, SRrelc, Effind, Spat_targc, hc,
R0c, SSBpRc, aRc, bRc, Qc=0, Fapic=FMSYc, MPA=MPA, maxF=maxF, control=2,
SSB0c=SSB0c, plusgroup = plusgroup)
# Yield
# Cn <- simpop[[7]]/simpop[[8]] * simpop[[1]] * (1-exp(-simpop[[8]])) # retained catch
Cn <- simpop[[6]]/simpop[[8]] * simpop[[1]] * (1-exp(-simpop[[8]])) # removals
# Cb <- Cn[,pyears,] * WtAge[,pyears]
# -sum(Cb)
Cb <- Cn[,(pyears-4):pyears,] * array(WtAge[,(pyears-4):pyears], dim=dim(Cn[,(pyears-4):pyears,]))
-mean(apply(Cb,2,sum))
}
#' Calculate Reference Yield
#'
#' @param x Integer, the simulation number
#' @param Asize A matrix (dimensions nsim by nareas) with relative size of areas
#' @param nareas The number of spatial areas
#' @param maxage The maximum age
#' @param N Array of the numbers-at-age in population. Dimensions are nsim, maxage, nyears, nareas.
#' Only values from the first year are used, which is the current N-at-age.
#' @param pyears The number of years to project forward. Equal to 'nyears' for optimizing for q.
#' @param M_ageArray An array (dimensions nsim, maxage, nyears+proyears) with the natural mortality-at-age and year
#' @param Mat_age An array (dimensions nsim, maxage, nyears+proyears) with the proportion mature for each age-class
#' @param Wt_age An array (dimensions nsim, maxage, nyears+proyears) with the weight-at-age and year
#' @param V An array (dimensions nsim, maxage, nyears+proyears) with the vulnerability-at-age and year
#' @param retA An array (dimensions nsim, maxage, nyears+proyears) with the probability retained-at-age and year
#' @param Perr A matrix (dimensions nsim, nyears+proyears) with the recruitment deviations
#' @param mov An array (dimensions nsim, nareas, nareas) with the movement matrix
#' @param SRrel A numeric vector nsim long specifying the recruitment curve to use
#' @param Find A matrix (dimensions nsim, nyears) with the historical fishing effort
#' @param Spat_targ A numeric vector nsim long with the spatial targeting
#' @param hs A numeric vector nsim long with the steepness values for each simulation
#' @param R0a A matrix (dimensions nsim, nareas) with the unfished recruitment by area
#' @param SSBpR A matrix (dimensions nsim, nareas) with the unfished spawning-per-recruit by area
#' @param aR A numeric vector nareas long with the Ricker SRR a values
#' @param bR A numeric vector nareas long with the Ricker SRR b values
#' @param MPA A matrix of spatial closures by year
#' @param maxF A numeric value specifying the maximum fishing mortality for any single age class
#' @param useCPP logical - use the CPP code? For testing purposes only
#' @param SSB0 SSB0
#' @param plusgroup Integer. Default = 0 = no plus-group. Use 1 to include a plus-group
#' @author A. Hordyk
#' @export
#' @keywords internal
getFref3 <- function(x, Asize, nareas, maxage, N, pyears, M_ageArray, Mat_age, Wt_age,
V, retA, Perr, mov, SRrel, Find, Spat_targ, hs, R0a, SSBpR, aR, bR,
MPA, maxF, SSB0, plusgroup=0) {
opt <- optimize(optMSY, log(c(0.001, 10)), Asize_c=Asize[x,], nareas, maxage, Ncurr=N[x,,1,],
pyears, M_age=M_ageArray[x,,], MatAge=Mat_age[x,,],
WtAge=Wt_age[x,,], Vuln=V[x,,], Retc=retA[x,,], Prec=Perr[x,],
movc=split.along.dim(mov[x,,,,],4), SRrelc=SRrel[x],
Effind=Find[x,], Spat_targc=Spat_targ[x], hc=hs[x], R0c=R0a[x,],
SSBpRc=SSBpR[x,], aRc=aR[x,], bRc=bR[x,], MPA=MPA, maxF=maxF,
SSB0c=SSB0[x], plusgroup=plusgroup)
-opt$objective
}
# Input Control Functions Wrapper function for input control methods
#' Runs input control MPs on a Data object.
#'
#' Function runs a MP (or MPs) of class 'Input' and returns a list: input
#' control recommendation(s) in element 1 and Data object in element 2.
#'
#'
#' @usage runInMP(Data, MPs = NA, reps = 100)
#' @param Data A object of class Data
#' @param MPs A vector of MPs of class 'Input'
#' @param reps Number of stochastic repititions - often not used in input
#' control MPs.
#' @author A. Hordyk
#' @export
runInMP <- function(Data, MPs = NA, reps = 100) {
nsims <- length(Data@Mort)
if (.hasSlot(Data, "nareas")) {
nareas <- Data@nareas
} else {
nareas <- 2
}
nMPs <- length(MPs)
returnList <- list() # a list nMPs long containing MPs recommendations
recList <- list() # a list containing nsim recommendations from a single MP
if (!sfIsRunning() | (nMPs < 8 & nsims < 8)) {
for (ff in 1:nMPs) {
temp <- sapply(1:nsims, MPs[ff], Data = Data, reps = reps)
slots <- slotNames(temp[[1]])
for (X in slots) { # sequence along recommendation slots
if (X == "Misc") { # convert to a list nsim by nareas
rec <- lapply(temp, slot, name=X)
} else {
rec <- unlist(lapply(temp, slot, name=X))
}
if (X == "Spatial") { # convert to a matrix nsim by nareas
rec <- matrix(rec, nsims, nareas, byrow=TRUE)
}
recList[[X]] <- rec
for (x in 1:nsims) Data@Misc[[x]] <- recList$Misc[[x]]
recList$Misc <- NULL
}
returnList[[ff]] <- recList
}
} else {
sfExport(list = c("Data"))
for (ff in 1:nMPs) {
temp <- sfSapply(1:nsims, MPs[ff], Data = Data, reps = reps)
slots <- slotNames(temp[[1]])
for (X in slots) { # sequence along recommendation slots
if (X == "Misc") { # convert to a list nsim by nareas
rec <- lapply(temp, slot, name=X)
} else {
rec <- unlist(lapply(temp, slot, name=X))
}
if (X == "Spatial") { # convert to a matrix nsim by nareas
rec <- matrix(rec, nsims, nareas, byrow=TRUE)
}
recList[[X]] <- rec
for (x in 1:nsims) Data@Misc[[x]] <- recList$Misc[[x]]
recList$Misc <- NULL
}
returnList[[ff]] <- recList
}
}
return(list(returnList, Data))
}
projectEq <- function(x, Asize, nareas, maxage, N, pyears, M_ageArray, Mat_age, Wt_age,
V, retA, Perr, mov, SRrel, Find, Spat_targ, hs, R0a, SSBpR, aR, bR,
SSB0, B0, MPA, maxF, Nyrs, R0) {
simpop <- popdynCPP(nareas, maxage, Ncurr=N[x,,1,],
pyears, M_age=M_ageArray[x,,], Asize_c=Asize[x,],
MatAge=Mat_age[x,,],
WtAge=Wt_age[x,,], Vuln=V[x,,], Retc=retA[x,,], Prec=Perr[x,],
movc=split.along.dim(mov[x,,,,],4), SRrelc=SRrel[x],
Effind=Find[x,], Spat_targc=Spat_targ[x], hc=hs[x], R0c=R0a[x,],
SSBpRc=SSBpR[x,], aRc=aR[x,], bRc=bR[x,], Qc=0, Fapic=0, MPA=MPA,
maxF=maxF, control=3, SSB0c=SSB0[x])
simpop[[1]][,Nyrs,]
}
optDfun <- function(Perrmulti, x, initD, Nfrac, R0, Perr_y, surv,
Wt_age, SSB0, maxage) {
initRecs <- rev(Perr_y[x,1:maxage]) * exp(Perrmulti)
SSN <- Nfrac[x,] * R0[x] * initRecs # Calculate initial spawning stock numbers
SSB <- SSN * Wt_age[x,,1] # Calculate spawning stock biomass
(sum(SSB)/SSB0[x] - initD[x])^2
}
optDfunwrap <- function(x, initD, Nfrac, R0, initdist, Perr_y, surv,
Wt_age, SSB0, maxage) {
interval <- log(c(0.01, 10))
optD <- optimise(optDfun, interval=interval, x=x, initD=initD, Nfrac=Nfrac,
R0=R0, Perr_y=Perr_y, surv=surv,
Wt_age=Wt_age, SSB0=SSB0, maxage=maxage)
exp(optD$minimum)
}
# calcMSYRicker <- function(MSYyr, M_ageArray, Wt_age, retA, V, Perr_y, maxage,
# nareas, Mat_age, nsim, Asize, N, Spat_targ, hs,
# SRrel, mov, Find, R0a, SSBpR, aR, bR, SSB0,
# B0, maxF=maxF, cur.yr) {
# # Note: MSY and refY are calculated from total removals not total catch (different when Fdisc>0 and there is discarding)
# # Make arrays for future conditions assuming current conditions
# M_ageArrayp <- array(M_ageArray[,,cur.yr], dim=c(dim(M_ageArray)[1:2], MSYyr))
# Wt_agep <- array(Wt_age[,,cur.yr], dim=c(dim(Wt_age)[1:2], MSYyr))
# retAp <- array(retA[,,cur.yr], dim=c(dim(retA)[1:2], MSYyr))
# Vp <- array(V[,,cur.yr], dim=c(dim(V)[1:2], MSYyr))
# Perrp <- array(1, dim=c(dim(Perr_y)[1], MSYyr+maxage))
# noMPA <- matrix(1, nrow=MSYyr, ncol=nareas)
# Mat_agep <-abind::abind(rep(list(Mat_age[,,cur.yr]), MSYyr), along=3)
# # optimize for MSY reference points
# if (snowfall::sfIsRunning()) {
# MSYrefs <- snowfall::sfSapply(1:nsim, getFMSY3, Asize, nareas=nareas,
# maxage=maxage, N=N, pyears=MSYyr,
# M_ageArray=M_ageArrayp, Mat_age=Mat_agep,
# Wt_age=Wt_agep, V=Vp, retA=retAp,
# Perr=Perrp, mov=mov, SRrel=SRrel,
# Find=Find, Spat_targ=Spat_targ, hs=hs,
# R0a=R0a, SSBpR=SSBpR, aR=aR, bR=bR, SSB0=SSB0,
# B0=B0, MPA=noMPA, maxF=maxF)
# } else {
# MSYrefs <- sapply(1:nsim, getFMSY3, Asize, nareas=nareas, maxage=maxage,
# N=N, pyears=MSYyr, M_ageArray=M_ageArrayp, Mat_age=Mat_agep,
# Wt_age=Wt_agep, V=Vp, retA=retAp,Perr=Perrp, mov=mov,
# SRrel=SRrel, Find=Find, Spat_targ=Spat_targ, hs=hs,
# R0a=R0a, SSBpR=SSBpR, aR=aR, bR=bR, SSB0=SSB0, B0=B0,
# MPA=noMPA, maxF=maxF)
# }
# MSYrefs
# }
# #' Apply output control recommendations and calculate population dynamics
# #'
# #' @param y Projection year
# #' @param Asize relative size of areas (matrix nsim by nareas)
# #' @param TACused TAC recommendation
# #' @param TAC_f Implementation error on TAC
# #' @param lastCatch Catch from last year
# #' @param availB Total available biomass
# #' @param maxF Maximum fishing mortality
# #' @param Biomass_P Numeric array (nsim, maxage, proyears, nareas) with Biomass at age
# #' @param VBiomass_P Numeric array (nsim, maxage, proyears, nareas) with Vulnerable Biomass at age
# #' @param CB_P Numeric array (nsim, maxage, proyears, nareas) with Catch Biomass at age
# #' @param CB_Pret Numeric array (nsim, maxage, proyears, nareas) with Retained catch biomass at age
# #' @param FM_P Numeric array (nsim, maxage, proyears, nareas) with fishing mortality at age
# #' @param Z_P Numeric array (nsim, maxage, proyears, nareas) with total mortality at age
# #' @param Spat_targ Spatial targetting
# #' @param V_P Numeric array(nsim, maxage, nyears+proyears) with vulnerability at age
# #' @param retA_P Numeric array(nsim, maxage, nyears+proyears) with retention at age
# #' @param M_ageArray Numeric array (nsim, maxage, nyears+proyears) Natural mortality at age
# #' @param qs Catchability coefficient
# #' @param nyears Number of historical years
# #' @param nsim Number of simulations
# #' @param maxage Maximum age
# #' @param nareas Number of areas
# #'
# # #' @export
# #'
# #' @author A. Hordyk
# #'
# CalcOutput <- function(y, Asize, TACused, TAC_f, lastCatch, availB, maxF, Biomass_P, VBiomass_P, CB_P, CB_Pret,
# FM_P, Z_P, Spat_targ, V_P, retA_P, M_ageArray, qs, nyears, nsim, maxage, nareas) {
# SAYRL <- as.matrix(expand.grid(1:nsim, 1:maxage, nyears, 1:nareas)) # Final historical year
# SAYRt <- as.matrix(expand.grid(1:nsim, 1:maxage, y + nyears, 1:nareas)) # Trajectory year
# SAYR <- as.matrix(expand.grid(1:nsim, 1:maxage, y, 1:nareas))
# SYt <- SAYRt[, c(1, 3)]
# SAYt <- SAYRt[, 1:3]
# SR <- SAYR[, c(1, 4)]
# SA1 <- SAYR[, 1:2]
# S1 <- SAYR[, 1]
# SY1 <- SAYR[, c(1, 3)]
# SAY1 <- SAYR[, 1:3]
# SYA <- as.matrix(expand.grid(1:nsim, 1, 1:maxage)) # Projection year
# SY <- SYA[, 1:2]
# SA <- SYA[, c(1, 3)]
# SAY <- SYA[, c(1, 3, 2)]
# S <- SYA[, 1]
#
# TACused[is.na(TACused)] <- lastCatch[is.na(TACused)] # if MP returns NA - TAC is set to catch from last year
#
# TACrec <- TACused # TAC recommendation
# TACusedE<- TAC_f[,y]*TACused # TAC taken after implementation error
#
# maxC <- (1 - exp(-maxF)) * availB # maximum catch given maxF
# TACusedE[TACusedE > maxC] <- maxC[TACusedE > maxC] # apply maxF limit - catch can't be higher than maxF * vulnerable biomass
#
# # fishdist <- (apply(VBiomass_P[, , y, ], c(1, 3), sum)^Spat_targ)/
# # apply(apply(VBiomass_P[, , y, ], c(1, 3), sum)^Spat_targ, 1, mean) # spatial preference according to spatial biomass
#
# fishdist <- (apply(VBiomass_P[, , y, ], c(1, 3), sum)^Spat_targ)/
# apply(apply(VBiomass_P[, , y, ], c(1, 3), sum)^Spat_targ, 1, sum) # spatial preference according to spatial biomass
#
#
#
# # If there is discard mortality, actual removals are higher than TACused
# # calculate distribution of all effort
# CB_P[SAYR] <- (Biomass_P[SAYR] * V_P[SAYt] * fishdist[SR])/Asize[SR] # ignore magnitude of effort or q increase (just get distribution across age and fishdist across space
# # calculate distribution of retained effort
# CB_Pret[SAYR] <- (Biomass_P[SAYR] * retA_P[SAYt] * fishdist[SR])/Asize[SR] # ignore magnitude of effort or q increase (just get distribution across age and fishdist across space
#
# retained <- apply(CB_Pret[,,y,], 1, sum)
# actualremovals <- apply(CB_P[,,y,], 1, sum)
#
# ratio <- actualremovals/retained # ratio of actual removals to retained catch
#
# temp <- CB_Pret[, , y, ]/apply(CB_Pret[, , y, ], 1, sum) # distribution of retained fish
# CB_Pret[, , y, ] <- TACusedE * temp # retained catch
#
# temp <- CB_P[, , y, ]/apply(CB_P[, , y, ], 1, sum) # distribution of removals
# CB_P[,,y,] <- TACusedE * ratio * temp # scale up total removals
#
# temp <- CB_P[SAYR]/(Biomass_P[SAYR] * exp(-M_ageArray[SAYt]/2)) # Pope's approximation
# temp[temp > (1 - exp(-maxF))] <- 1 - exp(-maxF)
#
# FM_P[SAYR] <- -log(1 - temp)
#
# # calcFs <- lapply(1:nsim, getFs, y=y, Vuln=V_P, CB=CB_P, Bio=Biomass_P, Mage=M_ageArray, Fdist=fishdist,
# # maxage=maxage, nareas=nareas, nyears=nyears) # numerically calculate Fs
# #
# #
# # FM_P[,,y,] <- aperm(array(unlist(calcFs, use.names=FALSE), dim=c(maxage, nareas, nsim)), c(3, 1, 2))
# # FM_P[,,y,][FM_P[,,y,] > (1-exp(-maxF))] <- 1 - exp(-maxF)
#
# Z_P[SAYR] <- FM_P[SAYR] + M_ageArray[SAYt]
#
# Effort <- (-log(1 - apply(CB_P[, , y, ], 1, sum)/(apply(CB_P[, , y, ], 1, sum) +
# apply(VBiomass_P[, , y, ], 1, sum))))/qs
# out <- list()
# out$Z_P <- Z_P
# out$FM_P <- FM_P
# out$CB_P <- CB_P
# out$CB_Pret <- CB_Pret
# out$TACused <- TACused
# out$TACrec <- TACrec
# out$Effort <- Effort
# out
# }
# #' Internal function to calculate F-at-age given catch and biomass
# #'
# #' @param x Simulation
# #' @param y year
# #' @param Vuln Vulnerabilty
# #' @param CB Catch biomass
# #' @param Bio Biomass
# #' @param Mage M-at-age
# #' @param Fdist Fishing distribution
# #' @param maxage Maximum age
# #' @param nareas Number of areas
# #' @param nyears Number of historical years
# #' @keywords internal
# #'
# #' @export
# #'
# #' @author A. Hordyk
# getFs <- function(x, y, Vuln, CB, Bio, Mage, Fdist, maxage, nareas, nyears) {
#
# doopt <- optimize(optF, interval=log(c(0.01, 10)), Vuln[x,,nyears+y], CB[x,,y,],
# Bio[x,,y,], Mage[x,,y+nyears], Fdist[x,], maxage,nareas)
#
# ind <- as.matrix(expand.grid(x, 1:maxage, 1:nareas))
# ind2 <- as.matrix(expand.grid(1, 1:maxage, 1:nareas))
# FM <- array(NA, dim=c(1, maxage, nareas))
# FM[ind2] <- exp(doopt$minimum) * Vuln[ind] * Fdist[ind[,c(1,3)]]
# FM
# }
#
# #' Internal function to optimize for F
# #'
# #' @param fapic Apical fishing mortality
# #' @param vuln Vulnerability
# #' @param catch Catch
# #' @param bio Biomass
# #' @param mort Natural mortality
# #' @param fdist Fishing distribution
# #' @param maxage Maximum age
# #' @param nareas Number of areas
# #'
# #' @export
# #'
# #' @author A. Hordyk
# optF <- function(fapic, vuln, catch, bio, mort, fdist, maxage, nareas) {
# FM <- array(NA, dim=c(maxage, nareas))
# ind <- as.matrix(expand.grid(1:maxage, 1:nareas))
# FM[ind] <- exp(fapic) * vuln[ind[,1]] * fdist[ind[,2]]
#
# # FM[ind] <- (exp(fapic) * vuln[ind[,1]] * fdist[ind[,2]]) / area_size[ind[,2]]
#
# Z <- FM + mort
#
# pCatch <- FM/Z * bio* (1-exp(-Z))
# (log(sum(pCatch)) - log(sum(catch)))^2
#
# }
# #' Apply input control recommendations and calculate population dynamics
# #'
# #' Internal function
# #'
# #' @param y Simulation year
# #' @param Asize Matrix (nsim by nareas) with relative size of areas
# #' @param nyears Number of historical
# #' @param proyears Number of projection years
# #' @param InputRecs Input control recommendations
# #' @param nsim Number of simulations
# #' @param nareas Number of areas
# #' @param LR5_P Length at 5 percent retention
# #' @param LFR_P Length at full retention
# #' @param Rmaxlen_P Retention of maximum length
# #' @param maxage Maximum age
# #' @param retA_P Retention at age
# #' @param retL_P Retention at length
# #' @param V_P Realized vulnerability at age
# #' @param V2 Gear vulnerability at age
# #' @param pSLarray Realized vulnerability at length
# #' @param SLarray2 Gear vulnerability at length
# #' @param DR Discard ratio
# #' @param maxlen maximum length
# #' @param Len_age Length-at-age
# #' @param CAL_binsmid Length-bin mid-points
# #' @param Fdisc Fraction of discarded fish that die
# #' @param nCALbins Number of length bins
# #' @param E_f Implementation error on effort recommendation
# #' @param SizeLim_f Implementation error on size limit
# #' @param VBiomass_P Vulnerable biomass-at-age
# #' @param Biomass_P Biomass-at-age
# #' @param Spat_targ Spatial targetting
# #' @param FinF Final fishing effort
# #' @param qvar Annual ariability in catchability
# #' @param qs Catchability
# #' @param qinc Numeric vector (nsim) increased
# #' @param CB_P Numeric array (nsim, maxage, proyears, nareas) Catch biomass at age
# #' @param CB_Pret Numeric array (nsim, maxage, proyears, nareas) Retained catch biomass at age
# #' @param FM_P Numeric array (nsim, maxage, proyears, nareas) Fishing mortality at age
# #' @param FM_retain Numeric array (nsim, maxage, proyears, nareas) Retained fishing mortality at age
# #' @param Z_P Numeric array (nsim, maxage, proyears, nareas) Total mortality at age
# #' @param M_ageArray Numeric array (nsim, maxage, nyears+proyears) Natural mortality at age
# #' @param LastEffort Numeric vector (nsim) with fishing effort from last year
# #' @param LastSpatial Numeric matrix (nsim, nareas) with spatial closures from last year
# #' @param LastAllocat Numeric vector (nsim) with allocation from last year
# #'
# #' @keywords internal
# #' @export
# #'
# #' @author A. Hordyk
# #'
# CalcInput <- function(y, Linf, Asize, nyears, proyears, InputRecs, nsim, nareas, LR5_P, LFR_P,
# Rmaxlen_P, maxage, retA_P, retL_P, V_P, V2, pSLarray,
# SLarray2, DR, maxlen, Len_age, CAL_binsmid, Fdisc,
# nCALbins, E_f, SizeLim_f, VBiomass_P, Biomass_P, Spat_targ,
# FinF, qvar, qs, qinc, CB_P, CB_Pret, FM_P, FM_retain, Z_P,
# M_ageArray, LastEffort, LastSpatial, LastAllocat) {
#
# SAYRL <- as.matrix(expand.grid(1:nsim, 1:maxage, nyears, 1:nareas)) # Final historical year
# SAYRt <- as.matrix(expand.grid(1:nsim, 1:maxage, y + nyears, 1:nareas)) # Trajectory year
# SAYR <- as.matrix(expand.grid(1:nsim, 1:maxage, y, 1:nareas))
# SYt <- SAYRt[, c(1, 3)]
# SAYt <- SAYRt[, 1:3]
# SR <- SAYR[, c(1, 4)]
# SA1 <- SAYR[, 1:2]
# S1 <- SAYR[, 1]
# SY1 <- SAYR[, c(1, 3)]
# SAY1 <- SAYR[, 1:3]
# SYA <- as.matrix(expand.grid(1:nsim, 1, 1:maxage)) # Projection year
# SY <- SYA[, 1:2]
# SA <- SYA[, c(1, 3)]
# SAY <- SYA[, c(1, 3, 2)]
# S <- SYA[, 1]
#
# # Change in Effort
# if (length(InputRecs$Effort) == 0) { # no effort recommendation
# if (y==1) Ei <- LastEffort * E_f[,y] # effort is unchanged but has implementation error
# if (y>1) Ei <- LastEffort / E_f[,y-1] * E_f[,y] # effort is unchanged but has implementation error
# } else if (length(InputRecs$Effort) != nsim) {
# stop("Effort recommmendation is not 'nsim' long.\n Does MP return Effort recommendation under all conditions?")
# } else {
# Ei <- InputRecs$Effort * E_f[,y] # effort adjustment with implementation error
# }
#
# # Spatial
# if (all(is.na(InputRecs$Spatial))) { # no spatial recommendation
# Si <- LastSpatial # matrix(1, nsim, nareas) # spatial is unchanged - modify this if spatial closure in historical years
# } else if (any(is.na(InputRecs$Spatial))) {
# stop("Spatial recommmendation has some NAs.\n Does MP return Spatial recommendation under all conditions?")
# } else {
# Si <-InputRecs$Spatial # change spatial fishing
# }
#
# # Allocation
# if (length(InputRecs$Allocate) == 0) { # no allocation recommendation
# Ai <- LastAllocat # rep(0, nsim) # allocation is unchanged
# } else if (length(InputRecs$Allocate) != nsim) {
# stop("Allocate recommmendation is not 'nsim' long.\n Does MP return Allocate recommendation under all conditions?")
# } else {
# Ai <- InputRecs$Allocate # change in spatial allocation
# }
# # Retention Curve
# RetentFlag <- FALSE
# # LR5
# if (length(InputRecs$LR5) == 0) { # no recommendation
# LR5_P[(y + nyears):(nyears+proyears),] <- matrix(LR5_P[y + nyears-1,],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # unchanged
#
# } else if (length(InputRecs$LR5) != nsim) {
# stop("LR5 recommmendation is not 'nsim' long.\n Does MP return LR5 recommendation under all conditions?")
# } else {
# LR5_P[(y + nyears):(nyears+proyears),] <- matrix(InputRecs$LR5 * SizeLim_f[,y],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # recommendation with implementation error
# RetentFlag <- TRUE
# }
# # LFR
# if (length(InputRecs$LFR) == 0) { # no recommendation
# LFR_P[(y + nyears):(nyears+proyears),] <- matrix(LFR_P[y + nyears-1,],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # unchanged
# } else if (length(InputRecs$LFR) != nsim) {
# stop("LFR recommmendation is not 'nsim' long.\n Does MP return LFR recommendation under all conditions?")
# } else {
# LFR_P[(y + nyears):(nyears+proyears),] <- matrix(InputRecs$LFR * SizeLim_f[,y],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # recommendation with implementation error
# RetentFlag <- TRUE
# }
# # Rmaxlen
# if (length(InputRecs$Rmaxlen) == 0) { # no recommendation
# Rmaxlen_P[(y + nyears):(nyears+proyears),] <- matrix(Rmaxlen_P[y + nyears-1,],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # unchanged
#
# } else if (length(Rmaxlen) != nsim) {
# stop("Rmaxlen recommmendation is not 'nsim' long.\n Does MP return Rmaxlen recommendation under all conditions?")
# } else {
# Rmaxlen_P[(y + nyears):(nyears+proyears),] <- matrix(InputRecs$Rmaxlen,
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # recommendation
# RetentFlag <- TRUE
# }
# # HS - harvest slot
#
# if (length(InputRecs$HS) == 0) { # no recommendation
# HS <- rep(1E5, nsim) # no harvest slot
# } else if (length(InputRecs$HS) != nsim) {
# stop("HS recommmendation is not 'nsim' long.\n Does MP return HS recommendation under all conditions?")
# } else {
# HS <- InputRecs$HS * SizeLim_f[,y] # recommendation
# RetentFlag <- TRUE
# }
# # Change in retention - update vulnerability and retention curves
# if (RetentFlag) {
# yr <- y+nyears
# allyrs <- (y+nyears):(nyears+proyears) # update vulnerabilty for all future years
#
# srs <- (Linf - LFR_P[yr,]) / ((-log(Rmaxlen_P[yr,],2))^0.5) # selectivity parameters are constant for all years
# sls <- (LFR_P[yr,] - LR5_P[yr,]) / ((-log(0.05,2))^0.5)
#
# CAL_binsmidMat <- matrix(CAL_binsmid, nrow=nsim, ncol=length(CAL_binsmid), byrow=TRUE)
# relLen <- t(sapply(1:nsim, getsel, lens=CAL_binsmidMat, lfs=LFR_P[yr,], sls=sls, srs=srs))
#
# for (yy in allyrs) {
# # calculate new retention at age curve
# retA_P[ , , yy] <- t(sapply(1:nsim, getsel, lens=Len_age[,,yy], lfs=LFR_P[yy,], sls=sls, srs=srs))
#
# # calculate new retention at length curve
# retL_P[,, yy] <- relLen
# }
#
# # upper harvest slot
# aboveHS <- Len_age[,,allyrs]>HS
# tretA_P <- retA_P[,,allyrs]
# tretA_P[aboveHS] <- 0
# retA_P[,,allyrs] <- tretA_P
# for (ss in 1:nsim) {
# index <- which(CAL_binsmid >= HS[ss])
# retL_P[ss, index, allyrs] <- 0
# }
#
# dr <- aperm(abind::abind(rep(list(DR), maxage), along=3), c(2,3,1))
# retA_P[,,allyrs] <- (1-dr[,,yr]) * retA_P[,,yr]
# dr <- aperm(abind::abind(rep(list(DR), nCALbins), along=3), c(2,3,1))
# retL_P[,,allyrs] <- (1-dr[,,yr]) * retL_P[,,yr]
#
# # update realized vulnerablity curve with retention and dead discarded fish
# Fdisc_array1 <- array(Fdisc, dim=c(nsim, maxage, length(allyrs)))
#
# V_P[,,allyrs] <- V2[,,allyrs] * (retA_P[,,allyrs] + (1-retA_P[,,allyrs])*Fdisc_array1)
#
# Fdisc_array2 <- array(Fdisc, dim=c(nsim, nCALbins, length(allyrs)))
# pSLarray[,,allyrs] <- SLarray2[,,allyrs] * (retL_P[,,allyrs]+ (1-retL_P[,,allyrs])*Fdisc_array2)
#
# # Realised Retention curves
# retA_P[,,allyrs] <- retA_P[,,allyrs] * V_P[,,allyrs]
# retL_P[,,allyrs] <- retL_P[,,allyrs] * pSLarray[,,allyrs]
#
# }
#
#
# newVB <- apply(Biomass_P[, , y, ] * V_P[SAYt], c(1, 3), sum) # calculate total vuln biomass by area
# # fishdist <- (newVB^Spat_targ)/apply(newVB^Spat_targ, 1, mean) # spatial preference according to spatial vulnerable biomass
# fishdist <- (newVB^Spat_targ)/apply(newVB^Spat_targ, 1, sum) # spatial preference according to spatial vulnerable biomass
# Emult <- 1 + ((2/apply(fishdist * Si, 1, sum)) - 1) * Ai # allocate effort to new area according to fraction allocation Ai
#
# # fishing mortality with input control recommendation
# FM_P[SAYR] <- (FinF[S1] * Ei[S1] * V_P[SAYt] * Si[SR] * fishdist[SR] * Emult[S1] * qvar[SY1] * (qs[S1]*(1 + qinc[S1]/100)^y))/Asize[SR]
#
# # retained fishing mortality with input control recommendation
# FM_retain[SAYR] <- (FinF[S1] * Ei[S1] * retA_P[SAYt] * Si[SR] * fishdist[SR] * Emult[S1] * qvar[SY1] * qs[S1]*(1 + qinc[S1]/100)^y)/Asize[SR]
#
# VBiomass_P[SAYR] <- Biomass_P[SAYR] * V_P[SAYt] # update vulnerable biomass
# Z_P[SAYR] <- FM_P[SAYR] + M_ageArray[SAYt] # calculate total mortality
#
# CB_P[SAYR] <- FM_P[SAYR]/Z_P[SAYR] * Biomass_P[SAYR] * (1 - exp(-Z_P[SAYR]))
# CB_Pret[SAYR] <- FM_retain[SAYR]/Z_P[SAYR] * Biomass_P[SAYR] * (1 - exp(-Z_P[SAYR]))
#
# out <- list()
# out$Z_P <- Z_P
# out$FM_P <- FM_P
# out$FM_retain <- FM_retain
# out$CB_P <- CB_P
# out$CB_Pret <- CB_Pret
# out$Effort <- Ei
# out$retA_P <- retA_P
# out$retL_P <- retL_P
# out$V_P <- V_P
# out$pSLarray <- pSLarray
# out$Si <- Si
# out$Ai <- Ai
# out
#
# }
| /R/popdyn.R | no_license | zanbi/DLMtool | R | false | false | 111,453 | r |
#' Calculate population dynamics from MP recommendation
#'
#' An internal function to calculate the population dynamics for the next time
#' step based on the recent MP recommendation
#'
#' @param MPRecs A named list of MP recommendations. The names are the same as `slotNames('Rec')`, except
#' for `Misc`. Each element in the list is a matrix. With the expection of `Spatial`, all elements in list
#' have `nrow=1` and `ncol=nsim`. `Spatial` has `nrow=nareas`. Matrices can be empty matrix, populated with all NAs
#' (both mean no change in management with respect to this element (e.g. `Effort`)), or populated with a recommendation.
#' MPs must either return a recommendation or no recommendation for every simulation for a particular slot (i.e. cannot have some NA and some values).
#' @param y The projection year
#' @param nyears The number of historical years
#' @param proyears The number of projection years
#' @param nsim The number of simulations
#' @param Biomass_P An array with dimensions `nsim`, `maxage`, `proyears`, and `nareas` with total biomass in the projection years
#' @param VBiomass_P An array with dimensions `nsim`, `maxage`, `proyears`, and `nareas` with vulnerable biomass in the projection years
#' @param LastTAE A vector of length `nsim` with the most recent TAE
#' @param LastSpatial A matrix of `nrow=nareas` and `ncol=nsim` with the most recent spatial management arrangements
#' @param LastAllocat A vector of length `nsim` with the most recent allocation
#' @param LastTAC A vector of length `nsim` with the most recent TAC
#' @param TACused A vector of length `nsim` with the most recent TAC
#' @param maxF A numeric value with maximum allowed F. From `OM@maxF`
#' @param LR5_P A matrix with `nyears+proyears` rows and `nsim` columns with the first length at 5 percent retention.
#' @param LFR_P A matrix with `nyears+proyears` rows and `nsim` columns with the first length at full retention.
#' @param Rmaxlen_P A matrix with `nyears+proyears` rows and `nsim` columns with the retention at maximum length.
#' @param retL_P An array with dimensions `nsim`, `nCALbins` and `nyears+proyears` with retention at length
#' @param retA_P An array with dimensions `nsim`, `maxage` and `nyears+proyears` with retention at age
#' @param L5_P A matrix with `nyears+proyears` rows and `nsim` columns with the first length at 5 percent selectivity
#' @param LFS_P A matrix with `nyears+proyears` rows and `nsim` columns with the first length at full selectivity
#' @param Vmaxlen_P A matrix with `nyears+proyears` rows and `nsim` columns with the selectivity at maximum length.
#' @param SLarray_P An array with dimensions `nsim`, `nCALbins` and `nyears+proyears` with selectivity at length
#' @param V_P An array with dimensions `nsim`, `maxage` and `nyears+proyears` with selectivity at age
#' @param Fdisc_P vector of length `nsim` with discard mortality. From `OM@Fdisc` but can be updated by MP (`Rec@Fdisc`)
#' @param DR_P A matrix with `nyears+proyears` rows and `nsim` columns with the fraction discarded.
#' @param M_ageArray An array with dimensions `nsim`, `maxage` and `nyears+proyears` with natural mortality at age
#' @param FM_P An array with dimensions `nsim`, `maxage`, `proyears`, and `nareas` with total fishing mortality
#' @param FM_Pret An array with dimensions `nsim`, `maxage`, `proyears`, and `nareas` with fishing mortality of the retained fish
#' @param Z_P An array with dimensions `nsim`, `maxage`, `proyears`, and `nareas` with total mortality
#' @param CB_P An array with dimensions `nsim`, `maxage`, `proyears`, and `nareas` with total catch
#' @param CB_Pret An array with dimensions `nsim`, `maxage`, `proyears`, and `nareas` with retained catch
#' @param TAC_f A matrix with `nsim` rows and `proyears` columns with the TAC implementation error
#' @param E_f A matrix with `nsim` rows and `proyears` columns with the effort implementation error
#' @param SizeLim_f A matrix with `nsim` rows and `proyears` columns with the size limit implementation error
#' @param FinF A numeric vector of length `nsim` with fishing mortality in the last historical year
#' @param Spat_targ A numeric vector of length `nsim` with spatial targeting
#' @param CAL_binsmid A numeric vector of length `nCALbins` with mid-points of the CAL bins
#' @param Linf A numeric vector of length `nsim` with Linf (from `Stock@Linf`)
#' @param Len_age An array with dimensions `nsim`, `maxage`, and `nyears+proyears` with length-at-age
#' @param maxage A numeric value with maximum age from `Stock@maxage`
#' @param nareas A numeric value with number of areas
#' @param Asize A matrix with `nsim` rows and `nareas` columns with the relative size of each area
#' @param nCALbins The number of CAL bins. Should be the same as `length(CAL_binsmid)`
#' @param qs A numeric vector of length `nsim` with catchability coefficient
#' @param qvar A matrix with `nsim` rows and `proyears` columns with catchability variability
#' @param qinc A numeric vector of length `nsim` with average annual change in catchability
#' @param Effort_pot A numeric vector of potential effort
#' @param checks Logical. Run internal checks? Currently not used.
#'
#' @return A named list with updated population dynamics
#' @author A. Hordyk
#' @export
#'
#' @keywords internal
CalcMPDynamics <- function(MPRecs, y, nyears, proyears, nsim, Biomass_P,
VBiomass_P,
LastTAE, histTAE, LastSpatial, LastAllocat, LastTAC,
TACused, maxF,
LR5_P, LFR_P, Rmaxlen_P, retL_P, retA_P,
L5_P, LFS_P, Vmaxlen_P, SLarray_P, V_P,
Fdisc_P, DR_P,
M_ageArray, FM_P, FM_Pret, Z_P, CB_P, CB_Pret,
TAC_f, E_f, SizeLim_f,
FinF, Spat_targ,
CAL_binsmid, Linf, Len_age, maxage, nareas, Asize, nCALbins,
qs, qvar, qinc,
Effort_pot,
checks=FALSE) {
# Effort
if (length(MPRecs$Effort) == 0) { # no max effort recommendation
if (y==1) TAE <- LastTAE * E_f[,y] # max effort is unchanged but has implementation error
if (y>1) TAE <- LastTAE / E_f[,y-1] * E_f[,y] # max effort is unchanged but has implementation error
} else if (length(MPRecs$Effort) != nsim) {
stop("Effort recommmendation is not 'nsim' long.\n Does MP return Effort recommendation under all conditions?")
} else {
# a maximum effort recommendation
if (!all(is.na(histTAE))) {
TAE <- histTAE * MPRecs$Effort * E_f[,y] # adjust existing TAE adjustment with implementation error
} else {
TAE <- MPRecs$Effort * E_f[,y] # adjust existing TAE adjustment with implementation error
}
}
# Spatial
if (all(is.na(MPRecs$Spatial))) { # no spatial recommendation
Si <- LastSpatial # spatial is unchanged
} else if (any(is.na(MPRecs$Spatial))) {
stop("Spatial recommmendation has some NAs.\n Does MP return Spatial recommendation under all conditions?")
} else {
Si <- MPRecs$Spatial # change spatial fishing
}
if (all(dim(Si) != c(nareas, nsim))) stop("Spatial recommmendation not nareas long")
# Allocation
if (length(MPRecs$Allocate) == 0) { # no allocation recommendation
Ai <- LastAllocat # allocation is unchanged
} else if (length(MPRecs$Allocate) != nsim) {
stop("Allocate recommmendation is not 'nsim' long.\n Does MP return Allocate recommendation under all conditions?")
} else {
Ai <- MPRecs$Allocate # change in spatial allocation
}
Ai <- as.numeric(Ai)
# Retention Curve
RetentFlag <- FALSE # should retention curve be updated for future years?
# LR5
if (length(MPRecs$LR5) == 0) { # no recommendation
LR5_P[(y + nyears):(nyears+proyears),] <- matrix(LR5_P[y + nyears-1,],
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # unchanged
} else if (length(MPRecs$LR5) != nsim) {
stop("LR5 recommmendation is not 'nsim' long.\n Does MP return LR5 recommendation under all conditions?")
} else {
LR5_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$LR5 * SizeLim_f[,y],
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # recommendation with implementation error
RetentFlag <- TRUE
}
# LFR
if (length(MPRecs$LFR) == 0) { # no recommendation
LFR_P[(y + nyears):(nyears+proyears),] <- matrix(LFR_P[y + nyears-1,],
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # unchanged
} else if (length(MPRecs$LFR) != nsim) {
stop("LFR recommmendation is not 'nsim' long.\n Does MP return LFR recommendation under all conditions?")
} else {
LFR_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$LFR * SizeLim_f[,y],
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # recommendation with implementation error
RetentFlag <- TRUE
}
# Rmaxlen
if (length(MPRecs$Rmaxlen) == 0) { # no recommendation
Rmaxlen_P[(y + nyears):(nyears+proyears),] <- matrix(Rmaxlen_P[y + nyears-1,],
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # unchanged
} else if (length(MPRecs$Rmaxlen) != nsim) {
stop("Rmaxlen recommmendation is not 'nsim' long.\n Does MP return Rmaxlen recommendation under all conditions?")
} else {
Rmaxlen_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$Rmaxlen,
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # recommendation
RetentFlag <- TRUE
}
# HS - harvest slot
if (length(MPRecs$HS) == 0) { # no recommendation
HS <- rep(1E5, nsim) # no harvest slot
} else if (length(MPRecs$HS) != nsim) {
stop("HS recommmendation is not 'nsim' long.\n Does MP return HS recommendation under all conditions?")
} else {
HS <- MPRecs$HS * SizeLim_f[,y] # recommendation
RetentFlag <- TRUE
}
# Selectivity Curve
SelectFlag <- FALSE # has selectivity been updated?
# L5
if (length(MPRecs$L5) == 0) { # no recommendation
L5_P[(y + nyears):(nyears+proyears),] <- matrix(L5_P[y + nyears-1,],
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # unchanged
} else if (length(MPRecs$L5) != nsim) {
stop("L5 recommmendation is not 'nsim' long.\n Does MP return L5 recommendation under all conditions?")
} else {
L5_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$L5 * SizeLim_f[,y],
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # recommendation with implementation error
SelectFlag <- TRUE
}
# LFS
if (length(MPRecs$LFS) == 0) { # no recommendation
LFS_P[(y + nyears):(nyears+proyears),] <- matrix(LFS_P[y + nyears-1,],
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # unchanged
} else if (length(MPRecs$LFS) != nsim) {
stop("LFS recommmendation is not 'nsim' long.\n Does MP return LFS recommendation under all conditions?")
} else {
LFS_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$LFS * SizeLim_f[,y],
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # recommendation with implementation error
SelectFlag <- TRUE
}
# Vmaxlen
if (length(MPRecs$Rmaxlen) == 0) { # no recommendation
Vmaxlen_P[(y + nyears):(nyears+proyears),] <- matrix(Vmaxlen_P[y + nyears-1,],
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # unchanged
} else if (length(MPRecs$Rmaxlen) != nsim) {
stop("Rmaxlen recommmendation is not 'nsim' long.\n Does MP return Rmaxlen recommendation under all conditions?")
} else {
Vmaxlen_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$Vmaxlen,
nrow=(length((y + nyears):(nyears+proyears))),
ncol=nsim, byrow=TRUE) # recommendation
SelectFlag <- TRUE
}
# Discard Mortality
if (length(MPRecs$Fdisc) >0) { # Fdisc has changed
if (length(MPRecs$Fdisc) != nsim) stop("Fdisc recommmendation is not 'nsim' long.\n Does MP return Fdisc recommendation under all conditions?")
Fdisc_P <- MPRecs$Fdisc
}
# Discard Ratio
if (length(MPRecs$DR)>0) { # DR has changed
if (length(MPRecs$DR) != nsim) stop("DR recommmendation is not 'nsim' long.\n Does MP return DR recommendation under all conditions?")
DR_P[(y+nyears):(nyears+proyears),] <- matrix(MPRecs$DR, nrow=length((y+nyears):(nyears+proyears)), ncol=nsim, byrow=TRUE)
}
# Update Selectivity and Retention Curve
if (SelectFlag | RetentFlag) {
yr <- y+nyears
allyrs <- (y+nyears):(nyears+proyears) # update vulnerabilty for all future years
srs <- (Linf - LFS_P[yr,]) / ((-log(Vmaxlen_P[yr,],2))^0.5) # descending limb
srs[!is.finite(srs)] <- Inf
sls <- (LFS_P[yr,] - L5_P[yr,]) / ((-log(0.05,2))^0.5) # ascending limb
CAL_binsmidMat <- matrix(CAL_binsmid, nrow=nsim, ncol=length(CAL_binsmid), byrow=TRUE)
selLen <- t(sapply(1:nsim, getsel, lens=CAL_binsmidMat, lfs=LFS_P[yr,], sls=sls, srs=srs))
for (yy in allyrs) {
# calculate new selectivity at age curve
V_P[ , , yy] <- t(sapply(1:nsim, getsel, lens=Len_age[,,yy], lfs=LFS_P[yy,], sls=sls, srs=srs))
SLarray_P[,, yy] <- selLen # calculate new selectivity at length curve
}
# sim <- 158
# plot(CAL_binsmid, selLen[sim,], type="b")
# lines(c(L5_P[yr,sim], L5_P[yr,sim]), c(0, 0.05), lty=2)
# lines(c(LFS_P[yr,sim], LFS_P[yr,sim]), c(0, 1), lty=2)
# lines(c(Linf[sim], Linf[sim]), c(0, Vmaxlen_P[yr,sim]), lty=2)
# calculate new retention curve
yr <- y+nyears
allyrs <- (y+nyears):(nyears+proyears) # update vulnerabilty for all future years
srs <- (Linf - LFR_P[yr,]) / ((-log(Rmaxlen_P[yr,],2))^0.5) # selectivity parameters are constant for all years
srs[!is.finite(srs)] <- Inf
sls <- (LFR_P[yr,] - LR5_P[yr,]) / ((-log(0.05,2))^0.5)
CAL_binsmidMat <- matrix(CAL_binsmid, nrow=nsim, ncol=length(CAL_binsmid), byrow=TRUE)
relLen <- t(sapply(1:nsim, getsel, lens=CAL_binsmidMat, lfs=LFR_P[yr,], sls=sls, srs=srs))
for (yy in allyrs) {
# calculate new retention at age curve
retA_P[ , , yy] <- t(sapply(1:nsim, getsel, lens=Len_age[,,yy], lfs=LFR_P[yy,], sls=sls, srs=srs))
retL_P[,, yy] <- relLen # calculate new retention at length curve
}
# upper harvest slot
aboveHS <- Len_age[,,allyrs, drop=FALSE]>array(HS, dim=c(nsim, maxage, length(allyrs)))
tretA_P <- retA_P[,,allyrs]
tretA_P[aboveHS] <- 0
retA_P[,,allyrs] <- tretA_P
for (ss in 1:nsim) {
index <- which(CAL_binsmid >= HS[ss])
retL_P[ss, index, allyrs] <- 0
}
dr <- aperm(abind::abind(rep(list(DR_P), maxage), along=3), c(2,3,1))
retA_P[,,allyrs] <- (1-dr[,,yr]) * retA_P[,,yr]
dr <- aperm(abind::abind(rep(list(DR_P), nCALbins), along=3), c(2,3,1))
retL_P[,,allyrs] <- (1-dr[,,yr]) * retL_P[,,yr]
# update realized vulnerablity curve with retention and dead discarded fish
Fdisc_array1 <- array(Fdisc_P, dim=c(nsim, maxage, length(allyrs)))
V_P[,,allyrs] <- V_P[,,allyrs, drop=FALSE] * (retA_P[,,allyrs, drop=FALSE] + (1-retA_P[,,allyrs, drop=FALSE])*Fdisc_array1)
Fdisc_array2 <- array(Fdisc_P, dim=c(nsim, nCALbins, length(allyrs)))
SLarray_P[,,allyrs] <- SLarray_P[,,allyrs, drop=FALSE] * (retL_P[,,allyrs, drop=FALSE]+ (1-retL_P[,,allyrs, drop=FALSE])*Fdisc_array2)
# Realised Retention curves
retA_P[,,allyrs] <- retA_P[,,allyrs] * V_P[,,allyrs]
retL_P[,,allyrs] <- retL_P[,,allyrs] * SLarray_P[,,allyrs]
}
CurrentB <- Biomass_P[,,y,] # biomass at the beginning of year
CurrentVB <- array(NA, dim=dim(CurrentB))
Catch_tot <- Catch_retain <- array(NA, dim=dim(CurrentB)) # catch this year arrays
FMc <- Zc <- array(NA, dim=dim(CurrentB)) # fishing and total mortality this year
# indices
SAYRL <- as.matrix(expand.grid(1:nsim, 1:maxage, nyears, 1:nareas)) # Final historical year
SAYRt <- as.matrix(expand.grid(1:nsim, 1:maxage, y + nyears, 1:nareas)) # Trajectory year
SAYR <- as.matrix(expand.grid(1:nsim, 1:maxage, y, 1:nareas))
SAR <- SAYR[, c(1,2,4)]
SAY <- SAYR[,c(1:3)]
SYt <- SAYRt[, c(1, 3)]
SAYt <- SAYRt[, 1:3]
SR <- SAYR[, c(1, 4)]
SA1 <- SAYR[, 1:2]
S1 <- SAYR[, 1]
SY1 <- SAYR[, c(1, 3)]
SAY1 <- SAYR[, 1:3]
SYA <- as.matrix(expand.grid(1:nsim, 1, 1:maxage)) # Projection year
SY <- SYA[, 1:2]
SA <- SYA[, c(1, 3)]
S <- SYA[, 1]
CurrentVB[SAR] <- CurrentB[SAR] * V_P[SAYt] # update available biomass if selectivity has changed
# Calculate fishing distribution if all areas were open
newVB <- apply(CurrentVB, c(1,3), sum) # calculate total vuln biomass by area
fishdist <- (newVB^Spat_targ)/apply(newVB^Spat_targ, 1, sum) # spatial preference according to spatial vulnerable biomass
d1 <- t(Si) * fishdist # distribution of fishing effort
fracE <- apply(d1, 1, sum) # fraction of current effort in open areas
fracE2 <- d1 * (fracE + (1-fracE) * Ai)/fracE # re-distribution of fishing effort accounting for re-allocation of effort
fishdist <- fracE2 # fishing effort by area
# ---- no TAC - calculate F with bio-economic effort ----
if (all(is.na(TACused))) {
if (all(is.na(Effort_pot)) & all(is.na(TAE))) Effort_pot <- rep(1, nsim) # historical effort
if (all(is.na(Effort_pot))) Effort_pot <- TAE[1,]
# fishing mortality with bio-economic effort
FM_P[SAYR] <- (FinF[S1] * Effort_pot[S1] * V_P[SAYt] * t(Si)[SR] * fishdist[SR] *
qvar[SY1] * (qs[S1]*(1 + qinc[S1]/100)^y))/Asize[SR]
# retained fishing mortality with bio-economic effort
FM_Pret[SAYR] <- (FinF[S1] * Effort_pot[S1] * retA_P[SAYt] * t(Si)[SR] * fishdist[SR] *
qvar[SY1] * qs[S1]*(1 + qinc[S1]/100)^y)/Asize[SR]
}
# ---- calculate required F and effort for TAC recommendation ----
if (!all(is.na(TACused))) { # a TAC has been set
# if MP returns NA - TAC is set to TAC from last year
TACused[is.na(TACused)] <- LastTAC[is.na(TACused)]
TACusedE <- TAC_f[,y]*TACused # TAC taken after implementation error
# Calculate total vulnerable biomass available mid-year accounting for any changes in selectivity &/or spatial closures
M_array <- array(0.5*M_ageArray[,,nyears+y], dim=c(nsim, maxage, nareas))
Atemp <- apply(CurrentVB * exp(-M_array), c(1,3), sum) # mid-year before fishing
availB <- apply(Atemp * t(Si), 1, sum) # adjust for spatial closures
# Calculate total F (using Steve Martell's approach http://api.admb-project.org/baranov_8cpp_source.html)
expC <- TACusedE
expC[TACusedE> availB] <- availB[TACusedE> availB] * 0.99
Ftot <- sapply(1:nsim, calcF, expC, V_P, Biomass_P, fishdist, Asize, maxage, nareas,
M_ageArray,nyears, y)
# apply max F constraint
Ftot[Ftot<0] <- maxF
Ftot[!is.finite(Ftot)] <- maxF
Ftot[Ftot>maxF] <- maxF
# Calculate F & Z by age class
FM_P[SAYR] <- Ftot[S] * V_P[SAYt] * fishdist[SR]/Asize[SR]
FM_Pret[SAYR] <- Ftot[S] * retA_P[SAYt] * fishdist[SR]/Asize[SR]
Z_P[SAYR] <- FM_P[SAYR] + M_ageArray[SAYt] # calculate total mortality
# Calculate total and retained catch
CB_P[SAYR] <- FM_P[SAYR]/Z_P[SAYR] * (1-exp(-Z_P[SAYR])) * Biomass_P[SAYR]
CB_Pret[SAYR] <- FM_P[SAYR]/Z_P[SAYR] * (1-exp(-Z_P[SAYR])) * Biomass_P[SAYR]
# Calculate total removals when CB_Pret == TAC - total removal > retained when discarding
actualremovals <- apply(CB_P[,,y,], 1, sum)
retained <- apply(CB_Pret[,,y,], 1, sum)
ratio <- actualremovals/retained # ratio of actual removals to retained catch
ratio[!is.finite(ratio)] <- 0
ratio[ratio>1E5] <- 1E5
temp <- CB_Pret[,,y,]/apply(CB_Pret[,,y,], 1, sum) # distribution by age & area of retained fish
Catch_retain <- TACusedE * temp # retained catch
Catch_tot <- CB_P[,,y,]/apply(CB_P[,,y,], 1, sum) # distribution by age & area of caught fish
temp <- Catch_tot/apply(Catch_tot, 1, sum) # distribution of removals
Catch_tot <- TACusedE * ratio * temp # scale up total removals (if applicable)
# total removals can't be more than available biomass
chk <- apply(Catch_tot, 1, sum) > availB
if (sum(chk)>0) {
c_temp <- apply(Catch_tot[chk,,, drop=FALSE], 1, sum)
ratio_temp <- (availB[chk]/c_temp) * 0.99
# scale total catches to 0.99 available biomass
if (sum(chk)>1) Catch_tot[chk,, ] <- Catch_tot[chk,,] * array(ratio_temp, dim=c(sum(chk), maxage, nareas))
if (sum(chk)==1) Catch_tot[chk,, ] <- Catch_tot[chk,,] * array(ratio_temp, dim=c(maxage, nareas))
}
# check where actual catches are higher than TAC due to discarding (with imp error)
ind <- which(apply(Catch_tot, 1, sum) > TACusedE)
if (length(ind)>0) {
# update Ftot calcs
Ftot[ind] <- sapply(ind, calcF, TACusedE, V_P, Biomass_P, fishdist, Asize,
maxage, nareas, M_ageArray,nyears, y)
}
# Calculate F & Z by age class
FM_P[SAYR] <- Ftot[S] * V_P[SAYt] * fishdist[SR]/Asize[SR]
FM_Pret[SAYR] <- Ftot[S] * retA_P[SAYt] * fishdist[SR]/Asize[SR]
Z_P[SAYR] <- FM_P[SAYR] + M_ageArray[SAYt] # calculate total mortality
}
# Apply maxF constraint
FM_P[SAYR][FM_P[SAYR] > maxF] <- maxF
FM_Pret[SAYR][FM_Pret[SAYR] > maxF] <- maxF
Z_P[SAYR] <- FM_P[SAYR] + M_ageArray[SAYt] # calculate total mortality
# Update catches after maxF constraint
CB_P[SAYR] <- FM_P[SAYR]/Z_P[SAYR] * (1-exp(-Z_P[SAYR])) * Biomass_P[SAYR]
CB_Pret[SAYR] <- FM_Pret[SAYR]/Z_P[SAYR] * (1-exp(-Z_P[SAYR])) * Biomass_P[SAYR]
# Effort_req - effort required to catch TAC
# Effort_pot - potential effort this year (active fishers) from bio-economic model
# Effort_act - actual effort this year
# TAE - maximum actual effort limit
# Effort_act < Effort_pot if Effort_req < Effort_pot
# Calculate total F (using Steve Martell's approach http://api.admb-project.org/baranov_8cpp_source.html)
totalCatch <- apply(CB_P[,,y,], 1, sum)
Ftot <- sapply(1:nsim, calcF, totalCatch, V_P, Biomass_P, fishdist, Asize, maxage, nareas,
M_ageArray,nyears, y)
# Effort relative to last historical with this potential catch
Effort_req <- Ftot/(FinF * qs*qvar[,y]* (1 + qinc/100)^y) * apply(fracE2, 1, sum) # effort required for this catch
# Limit effort to potential effort from bio-economic model
Effort_act <- Effort_req
if (!all(is.na(Effort_pot))) {
excessEff <- Effort_req>Effort_pot # simulations where required effort > potential effort
Effort_act[excessEff] <- Effort_pot[excessEff] # actual effort can't be more than bio-economic effort
}
# Limit actual effort <= TAE
if (!all(is.na(TAE))) { # a TAE exists
Effort_act[Effort_act>TAE] <- TAE[Effort_act>TAE]
}
Effort_act[Effort_act<=0] <- tiny
# --- Re-calculate catch given actual effort ----
# fishing mortality with actual effort
FM_P[SAYR] <- (FinF[S1] * Effort_act[S1] * V_P[SAYt] * t(Si)[SR] * fishdist[SR] *
qvar[SY1] * (qs[S1]*(1 + qinc[S1]/100)^y))/Asize[SR]
# retained fishing mortality with actual effort
FM_Pret[SAYR] <- (FinF[S1] * Effort_act[S1] * retA_P[SAYt] * t(Si)[SR] * fishdist[SR] *
qvar[SY1] * qs[S1]*(1 + qinc[S1]/100)^y)/Asize[SR]
# Apply maxF constraint
FM_P[SAYR][FM_P[SAYR] > maxF] <- maxF
FM_Pret[SAYR][FM_Pret[SAYR] > maxF] <- maxF
Z_P[SAYR] <- FM_P[SAYR] + M_ageArray[SAYt] # calculate total mortality
# Update catches after maxF constraint
CB_P[SAYR] <- FM_P[SAYR]/Z_P[SAYR] * (1-exp(-Z_P[SAYR])) * Biomass_P[SAYR]
CB_Pret[SAYR] <- FM_Pret[SAYR]/Z_P[SAYR] * (1-exp(-Z_P[SAYR])) * Biomass_P[SAYR]
# Calculate total F (using Steve Martell's approach http://api.admb-project.org/baranov_8cpp_source.html)
totalCatch <- apply(CB_P[,,y,], 1, sum)
Ftot <- sapply(1:nsim, calcF, totalCatch, V_P, Biomass_P, fishdist, Asize, maxage, nareas,
M_ageArray,nyears, y) # update if effort has changed
# Returns
out <- list()
out$TACrec <- TACused
out$V_P <- V_P
out$SLarray_P <- SLarray_P
out$retA_P <- retA_P
out$retL_P <- retL_P
out$Fdisc_P <- Fdisc_P
out$VBiomass_ <- VBiomass_P
out$Z_P <- Z_P
out$FM_P <- FM_P
out$FM_Pret <- FM_Pret
out$CB_P <- CB_P
out$CB_Pret <- CB_Pret
out$Si <- Si
out$Ai <- Ai
out$TAE <- TAE
out$Effort <- Effort_act # actual effort this year
out$Ftot <- Ftot
out
}
# if (length(MPRecs$Effort) >0 | all(Ei != 1)) { # an effort regulation also exists
# #Make sure Effort doesn't exceed regulated effort
# aboveE <- which(Effort > Ei)
# if (length(aboveE)>0) {
# Effort[aboveE] <- Ei[aboveE] * apply(fracE2, 1, sum)[aboveE]
# SAYR <- as.matrix(expand.grid(aboveE, 1:maxage, y, 1:nareas))
# SAYRt <- as.matrix(expand.grid(aboveE, 1:maxage, y + nyears, 1:nareas)) # Trajectory year
# SYt <- SAYRt[, c(1, 3)]
# SAYt <- SAYRt[, 1:3]
# SR <- SAYR[, c(1, 4)]
# S1 <- SAYR[, 1]
# SY1 <- SAYR[, c(1, 3)]
# FM_P[SAYR] <- (FinF[S1] * Ei[S1] * V_P[SAYt] * t(Si)[SR] * fishdist[SR] * qvar[SY1] *
# (qs[S1]*(1 + qinc[S1]/100)^y))/Asize[SR]
#
# # retained fishing mortality with input control recommendation
# FM_Pret[SAYR] <- (FinF[S1] * Ei[S1] * retA_P[SAYt] * t(Si)[SR] * fishdist[SR] *
# qvar[SY1] * qs[S1]*(1 + qinc[S1]/100)^y)/Asize[SR]
#
# Z_P[SAYR] <- FM_P[SAYR] + M_ageArray[SAYt] # calculate total mortality
# CB_P[SAYR] <- (1-exp(-FM_P[SAYR])) * (Biomass_P[SAYR] * exp(-0.5*M_ageArray[SAYt]))
# CB_Pret[SAYR] <- (1-exp(-FM_Pret[SAYR])) * (Biomass_P[SAYR] * exp(-0.5*M_ageArray[SAYt]))
# }
# }
# CalcMPDynamics <- function(MPRecs, y, nyears, proyears, nsim,
# LastEi, LastSpatial, LastAllocat, LastCatch,
# TACused, maxF,
# LR5_P, LFR_P, Rmaxlen_P, retL_P, retA_P,
# L5_P, LFS_P, Vmaxlen_P, SLarray_P, V_P,
# Fdisc_P, DR_P,
# M_ageArray, FM_P, FM_Pret, Z_P, CB_P, CB_Pret,
# TAC_f, E_f, SizeLim_f,
# VBiomass_P, Biomass_P, FinF, Spat_targ,
# CAL_binsmid, Linf, Len_age, maxage, nareas, Asize, nCALbins,
# qs, qvar, qinc) {
# # Change in Effort
# if (length(MPRecs$Effort) == 0) { # no effort recommendation
# if (y==1) Ei <- LastEi * E_f[,y] # effort is unchanged but has implementation error
# if (y>1) Ei <- LastEi / E_f[,y-1] * E_f[,y] # effort is unchanged but has implementation error
# } else if (length(MPRecs$Effort) != nsim) {
# stop("Effort recommmendation is not 'nsim' long.\n Does MP return Effort recommendation under all conditions?")
# } else {
# Ei <- MPRecs$Effort * E_f[,y] # effort adjustment with implementation error
# }
#
# # Spatial
# if (all(is.na(MPRecs$Spatial))) { # no spatial recommendation
# Si <- LastSpatial # spatial is unchanged
# } else if (any(is.na(MPRecs$Spatial))) {
# stop("Spatial recommmendation has some NAs.\n Does MP return Spatial recommendation under all conditions?")
# } else {
# Si <- MPRecs$Spatial # change spatial fishing
# }
# if (all(dim(Si) != c(nareas, nsim))) stop("Spatial recommmendation not nareas long")
#
# # Allocation
# if (length(MPRecs$Allocate) == 0) { # no allocation recommendation
# Ai <- LastAllocat # allocation is unchanged
# } else if (length(MPRecs$Allocate) != nsim) {
# stop("Allocate recommmendation is not 'nsim' long.\n Does MP return Allocate recommendation under all conditions?")
# } else {
# Ai <- MPRecs$Allocate # change in spatial allocation
# }
# Ai <- as.numeric(Ai)
#
# # Retention Curve
# RetentFlag <- FALSE # should retention curve be updated for future years?
# # LR5
# if (length(MPRecs$LR5) == 0) { # no recommendation
# LR5_P[(y + nyears):(nyears+proyears),] <- matrix(LR5_P[y + nyears-1,],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # unchanged
#
# } else if (length(MPRecs$LR5) != nsim) {
# stop("LR5 recommmendation is not 'nsim' long.\n Does MP return LR5 recommendation under all conditions?")
# } else {
# LR5_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$LR5 * SizeLim_f[,y],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # recommendation with implementation error
# RetentFlag <- TRUE
# }
# # LFR
# if (length(MPRecs$LFR) == 0) { # no recommendation
# LFR_P[(y + nyears):(nyears+proyears),] <- matrix(LFR_P[y + nyears-1,],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # unchanged
# } else if (length(MPRecs$LFR) != nsim) {
# stop("LFR recommmendation is not 'nsim' long.\n Does MP return LFR recommendation under all conditions?")
# } else {
# LFR_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$LFR * SizeLim_f[,y],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # recommendation with implementation error
# RetentFlag <- TRUE
# }
# # Rmaxlen
# if (length(MPRecs$Rmaxlen) == 0) { # no recommendation
# Rmaxlen_P[(y + nyears):(nyears+proyears),] <- matrix(Rmaxlen_P[y + nyears-1,],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # unchanged
#
# } else if (length(MPRecs$Rmaxlen) != nsim) {
# stop("Rmaxlen recommmendation is not 'nsim' long.\n Does MP return Rmaxlen recommendation under all conditions?")
# } else {
# Rmaxlen_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$Rmaxlen,
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # recommendation
# RetentFlag <- TRUE
# }
#
#
# # HS - harvest slot
# if (length(MPRecs$HS) == 0) { # no recommendation
# HS <- rep(1E5, nsim) # no harvest slot
# } else if (length(MPRecs$HS) != nsim) {
# stop("HS recommmendation is not 'nsim' long.\n Does MP return HS recommendation under all conditions?")
# } else {
# HS <- MPRecs$HS * SizeLim_f[,y] # recommendation
# RetentFlag <- TRUE
# }
#
# # Selectivity Curve
# SelectFlag <- FALSE # has selectivity been updated?
# # L5
# if (length(MPRecs$L5) == 0) { # no recommendation
# L5_P[(y + nyears):(nyears+proyears),] <- matrix(L5_P[y + nyears-1,],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # unchanged
#
# } else if (length(MPRecs$L5) != nsim) {
# stop("L5 recommmendation is not 'nsim' long.\n Does MP return L5 recommendation under all conditions?")
# } else {
# L5_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$L5 * SizeLim_f[,y],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # recommendation with implementation error
# SelectFlag <- TRUE
# }
# # LFS
# if (length(MPRecs$LFS) == 0) { # no recommendation
# LFS_P[(y + nyears):(nyears+proyears),] <- matrix(LFS_P[y + nyears-1,],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # unchanged
# } else if (length(MPRecs$LFS) != nsim) {
# stop("LFS recommmendation is not 'nsim' long.\n Does MP return LFS recommendation under all conditions?")
# } else {
# LFS_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$LFS * SizeLim_f[,y],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # recommendation with implementation error
# SelectFlag <- TRUE
# }
# # Vmaxlen
# if (length(MPRecs$Rmaxlen) == 0) { # no recommendation
# Vmaxlen_P[(y + nyears):(nyears+proyears),] <- matrix(Vmaxlen_P[y + nyears-1,],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # unchanged
#
# } else if (length(MPRecs$Rmaxlen) != nsim) {
# stop("Rmaxlen recommmendation is not 'nsim' long.\n Does MP return Rmaxlen recommendation under all conditions?")
# } else {
# Vmaxlen_P[(y + nyears):(nyears+proyears),] <- matrix(MPRecs$Vmaxlen,
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # recommendation
# SelectFlag <- TRUE
# }
#
# # Discard Mortality
# if (length(MPRecs$Fdisc) >0) { # Fdisc has changed
# if (length(MPRecs$Fdisc) != nsim) stop("Fdisc recommmendation is not 'nsim' long.\n Does MP return Fdisc recommendation under all conditions?")
# Fdisc_P <- MPRecs$Fdisc
# }
#
# # Discard Ratio
# if (length(MPRecs$DR)>0) { # DR has changed
# if (length(MPRecs$DR) != nsim) stop("DR recommmendation is not 'nsim' long.\n Does MP return DR recommendation under all conditions?")
# DR_P[(y+nyears):(nyears+proyears),] <- matrix(MPRecs$DR, nrow=length((y+nyears):(nyears+proyears)), ncol=nsim, byrow=TRUE)
# }
#
# # Update Selectivity and Retention Curve
# if (SelectFlag | RetentFlag) {
# yr <- y+nyears
# allyrs <- (y+nyears):(nyears+proyears) # update vulnerabilty for all future years
#
# srs <- (Linf - LFS_P[yr,]) / ((-log(Vmaxlen_P[yr,],2))^0.5) # descending limb
# srs[!is.finite(srs)] <- Inf
# sls <- (LFS_P[yr,] - L5_P[yr,]) / ((-log(0.05,2))^0.5) # ascending limb
#
# CAL_binsmidMat <- matrix(CAL_binsmid, nrow=nsim, ncol=length(CAL_binsmid), byrow=TRUE)
# selLen <- t(sapply(1:nsim, getsel, lens=CAL_binsmidMat, lfs=LFS_P[yr,], sls=sls, srs=srs))
#
# for (yy in allyrs) {
# # calculate new selectivity at age curve
# V_P[ , , yy] <- t(sapply(1:nsim, getsel, lens=Len_age[,,yy], lfs=LFS_P[yy,], sls=sls, srs=srs))
#
# # calculate new selectivity at length curve
# SLarray_P[,, yy] <- selLen
# }
#
# # sim <- 158
# # plot(CAL_binsmid, selLen[sim,], type="b")
# # lines(c(L5_P[yr,sim], L5_P[yr,sim]), c(0, 0.05), lty=2)
# # lines(c(LFS_P[yr,sim], LFS_P[yr,sim]), c(0, 1), lty=2)
# # lines(c(Linf[sim], Linf[sim]), c(0, Vmaxlen_P[yr,sim]), lty=2)
#
# # calculate new retention curve
# yr <- y+nyears
# allyrs <- (y+nyears):(nyears+proyears) # update vulnerabilty for all future years
#
# srs <- (Linf - LFR_P[yr,]) / ((-log(Rmaxlen_P[yr,],2))^0.5) # selectivity parameters are constant for all years
# srs[!is.finite(srs)] <- Inf
# sls <- (LFR_P[yr,] - LR5_P[yr,]) / ((-log(0.05,2))^0.5)
#
# CAL_binsmidMat <- matrix(CAL_binsmid, nrow=nsim, ncol=length(CAL_binsmid), byrow=TRUE)
# relLen <- t(sapply(1:nsim, getsel, lens=CAL_binsmidMat, lfs=LFR_P[yr,], sls=sls, srs=srs))
#
# for (yy in allyrs) {
# # calculate new retention at age curve
# retA_P[ , , yy] <- t(sapply(1:nsim, getsel, lens=Len_age[,,yy], lfs=LFR_P[yy,], sls=sls, srs=srs))
#
# # calculate new retention at length curve
# retL_P[,, yy] <- relLen
# }
#
# # upper harvest slot
# aboveHS <- Len_age[,,allyrs, drop=FALSE]>array(HS, dim=c(nsim, maxage, length(allyrs)))
# tretA_P <- retA_P[,,allyrs]
# tretA_P[aboveHS] <- 0
# retA_P[,,allyrs] <- tretA_P
# for (ss in 1:nsim) {
# index <- which(CAL_binsmid >= HS[ss])
# retL_P[ss, index, allyrs] <- 0
# }
#
# dr <- aperm(abind::abind(rep(list(DR_P), maxage), along=3), c(2,3,1))
# retA_P[,,allyrs] <- (1-dr[,,yr]) * retA_P[,,yr]
# dr <- aperm(abind::abind(rep(list(DR_P), nCALbins), along=3), c(2,3,1))
# retL_P[,,allyrs] <- (1-dr[,,yr]) * retL_P[,,yr]
#
# # update realized vulnerablity curve with retention and dead discarded fish
# Fdisc_array1 <- array(Fdisc_P, dim=c(nsim, maxage, length(allyrs)))
#
# V_P[,,allyrs] <- V_P[,,allyrs, drop=FALSE] * (retA_P[,,allyrs, drop=FALSE] + (1-retA_P[,,allyrs, drop=FALSE])*Fdisc_array1)
#
# Fdisc_array2 <- array(Fdisc_P, dim=c(nsim, nCALbins, length(allyrs)))
# SLarray_P[,,allyrs] <- SLarray_P[,,allyrs, drop=FALSE] * (retL_P[,,allyrs, drop=FALSE]+ (1-retL_P[,,allyrs, drop=FALSE])*Fdisc_array2)
#
# # Realised Retention curves
# retA_P[,,allyrs] <- retA_P[,,allyrs] * V_P[,,allyrs]
# retL_P[,,allyrs] <- retL_P[,,allyrs] * SLarray_P[,,allyrs]
# }
#
# # indices
# SAYRL <- as.matrix(expand.grid(1:nsim, 1:maxage, nyears, 1:nareas)) # Final historical year
# SAYRt <- as.matrix(expand.grid(1:nsim, 1:maxage, y + nyears, 1:nareas)) # Trajectory year
# SAYR <- as.matrix(expand.grid(1:nsim, 1:maxage, y, 1:nareas))
# SYt <- SAYRt[, c(1, 3)]
# SAYt <- SAYRt[, 1:3]
# SR <- SAYR[, c(1, 4)]
# SA1 <- SAYR[, 1:2]
# S1 <- SAYR[, 1]
# SY1 <- SAYR[, c(1, 3)]
# SAY1 <- SAYR[, 1:3]
# SYA <- as.matrix(expand.grid(1:nsim, 1, 1:maxage)) # Projection year
# SY <- SYA[, 1:2]
# SA <- SYA[, c(1, 3)]
# SAY <- SYA[, c(1, 3, 2)]
# S <- SYA[, 1]
#
# # update vulnerable biomass for selectivitity curve
# VBiomass_P[SAYR] <- Biomass_P[SAYR] * V_P[SAYt] # update vulnerable biomass
#
# # Calculate fishing distribution if all areas were open
# newVB <- apply(VBiomass_P[,,y,], c(1,3), sum) # calculate total vuln biomass by area
# fishdist <- (newVB^Spat_targ)/apply(newVB^Spat_targ, 1, sum) # spatial preference according to spatial vulnerable biomass
#
# d1 <- t(Si) * fishdist # distribution of fishing effort
# fracE <- apply(d1, 1, sum) # fraction of current effort in open areas
# fracE2 <- d1 * (fracE + (1-fracE) * Ai)/fracE # re-distribution of fishing effort
#
# fishdist <- fracE2 # fishing effort by area
#
# # Apply TAC recommendation
# if (all(is.na(TACused))) { # no TAC has been set
#
# # fishing mortality with effort control recommendation
# FM_P[SAYR] <- (FinF[S1] * Ei[S1] * V_P[SAYt] * t(Si)[SR] * fishdist[SR] *
# qvar[SY1] * (qs[S1]*(1 + qinc[S1]/100)^y))/Asize[SR]
#
# # retained fishing mortality with effort control recommendation
# FM_Pret[SAYR] <- (FinF[S1] * Ei[S1] * retA_P[SAYt] * t(Si)[SR] * fishdist[SR] *
# qvar[SY1] * qs[S1]*(1 + qinc[S1]/100)^y)/Asize[SR]
#
# Z_P[SAYR] <- FM_P[SAYR] + M_ageArray[SAYt] # calculate total mortality
#
# CB_P[SAYR] <- FM_P[SAYR]/Z_P[SAYR] * Biomass_P[SAYR] * (1 - exp(-Z_P[SAYR]))
# CB_Pret[SAYR] <- FM_Pret[SAYR]/Z_P[SAYR] * Biomass_P[SAYR] * (1 - exp(-Z_P[SAYR]))
#
# Effort <- FinF *Ei * apply(fracE2, 1, sum) # (Fs/qs)/ FinF # Ei # (Fs/qs)/ FinF change in catchability not included in effort calc: * qvar[,y] * ((1 + qinc/100)^y))
#
# } else { # A TAC has been set
#
#
# TACused[is.na(TACused)] <- LastCatch[is.na(TACused)] # if MP returns NA - TAC is set to catch from last year
# TACrec <- TACused # TAC recommendation
# TACusedE<- TAC_f[,y]*TACused # TAC taken after implementation error
#
# availB <- apply(newVB * t(Si), 1, sum)
#
# # maxC <- (1 - exp(-maxF)) * availB # maximum catch given maxF
# # TACusedE[TACusedE > maxC] <- maxC[TACusedE > maxC] # apply maxF limit - catch can't be higher than maxF * vulnerable biomass
#
# CB_P[SAYR] <- (Biomass_P[SAYR] * V_P[SAYt] * fishdist[SR])/Asize[SR] # ignore magnitude of effort or q increase (just get distribution across age and fishdist across space
# # calculate distribution of retained effort
# CB_Pret[SAYR] <- (Biomass_P[SAYR] * retA_P[SAYt] * fishdist[SR])/Asize[SR] # ignore magnitude of effort or q increase (just get distribution across age and fishdist across space
#
# retained <- apply(CB_Pret[,,y,], 1, sum)
# actualremovals <- apply(CB_P[,,y,], 1, sum)
# ratio <- actualremovals/retained # ratio of actual removals to retained catch
# ratio[!is.finite(ratio)] <- 0
# ratio[ratio>1E5] <- 1E5
# temp <- CB_Pret[, , y, ]/apply(CB_Pret[, , y, ], 1, sum) # distribution of retained fish
# CB_Pret[, , y, ] <- TACusedE * temp # retained catch
#
# temp <- CB_P[, , y, ]/apply(CB_P[, , y, ], 1, sum) # distribution of removals
#
# CB_P[,,y,] <- TACusedE * ratio * temp # scale up total removals
#
# chk <- apply(CB_P[,,y,], 1, sum) > availB # total removals can't be more than available biomass
# if (sum(chk)>0) {
# c_temp <- apply(CB_P[chk,,y,, drop=FALSE], 1, sum)
# ratio_temp <- (availB[chk]/c_temp) * 0.99
# if (sum(chk)>1) CB_P[chk,,y, ] <- CB_P[chk,,y,] * array(ratio_temp, dim=c(sum(chk), maxage, nareas))
# if (sum(chk)==1) CB_P[chk,,y, ] <- CB_P[chk,,y,] * array(ratio_temp, dim=c(maxage, nareas))
# }
#
# # temp <- CB_P[SAYR]/(Biomass_P[SAYR] * exp(-M_ageArray[SAYt]/2)) # Pope's approximation
# # temp[temp > (1 - exp(-maxF))] <- 1 - exp(-maxF) # apply maxF constraint
# # FM_P[SAYR] <- -log(1 - temp)
#
# # Calculate F by age class and area & apply maxF constraint
# temp1 <- sapply(1:nsim, function(sim)
# CalculateF(CB_P[sim,,y,], M_ageArray[sim,,y], V_P[sim,,y], Biomass_P[sim,,y,], maxF=maxF, byage=TRUE))
#
# temp <- as.vector(aperm(temp1, c(2,1)))
#
# FM_P[SAYR] <- temp
# Z_P[SAYR] <- FM_P[SAYR] + M_ageArray[SAYt] # calculate total mortality
# # update removals with maxF constraint
# CB_P[SAYR] <- FM_P[SAYR]/Z_P[SAYR] * Biomass_P[SAYR] * (1 - exp(-Z_P[SAYR]))
#
# # t2 <- apply(CB_P[,,y,],1, sum)
#
# Fs <- sapply(1:nsim, function(sim)
# CalculateF(Catch_age_area=CB_P[sim,,y,], M_at_Age=M_ageArray[sim,,y],
# Vuln_age=V_P[sim,,y], B_age_area=Biomass_P[sim,,y,],
# maxF=maxF, byage=FALSE))
# Fs/FMSY
# apply(CB_P[,,y,], 1, sum)
# TACused
#
#
# Data@OM$A[x] * (1-exp(-Fs[x]))
# Data@OM$A[x] * (1-exp(-FMSY[x]))
#
#
# # repeated because of approximation error in Pope's approximation - an issue if CB_P ~ AvailB
# # chk <- apply(CB_P[,,y,], 1, sum) > availB # total removals can't be more than available biomass
# #
# # if (sum(chk)>0) {
# # c_temp <- apply(CB_P[chk,,y,, drop=FALSE], 1, sum)
# # ratio_temp <- (availB[chk]/c_temp) * 0.99
# # if (sum(chk)>1) CB_P[chk,,y, ] <- CB_P[chk,,y,] * array(ratio_temp, dim=c(sum(chk), maxage, nareas))
# # if (sum(chk)==1) CB_P[chk,,y, ] <- CB_P[chk,,y,] * array(ratio_temp, dim=c(maxage, nareas))
# # }
#
# # retained catch
# # temp <- CB_Pret[SAYR]/(Biomass_P[SAYR] * exp(-M_ageArray[SAYt]/2)) # Pope's approximation
# # temp[temp > (1 - exp(-maxF))] <- 1 - exp(-maxF) # apply maxF constraint
# # FM_Pret[SAYR] <- -log(1 - temp)
#
# # retained catch with maxF constraint
# temp1 <- sapply(1:nsim, function(sim)
# CalculateF(CB_Pret[sim,,y,], M_ageArray[sim,,y], V_P[sim,,y], Biomass_P[sim,,y,], maxF=maxF, byage=TRUE))
# temp <- as.vector(aperm(temp1, c(2,1)))
# FM_Pret[SAYR] <- temp
# # update retained catch
# CB_Pret[SAYR] <- FM_Pret[SAYR]/Z_P[SAYR] * Biomass_P[SAYR] * (1 - exp(-Z_P[SAYR]))
#
# # M_age_area <- array(M_ageArray[,,y], dim=c(nsim, maxage, nareas))
#
#
#
#
#
#
# # Fs <- suppressWarnings(-log(1 - apply(CB_P[, , y, ], 1, sum)/apply(VBiomass_P[, , y, ]*exp(-(0.5*M_age_area)), 1, sum))) # Pope's approx
# # Fs[!is.finite(Fs)] <- 2 # NaN for very high Fs
#
# Effort <- Fs/(FinF * qs*qvar[,y]* (1 + qinc/100)^y) * apply(fracE2, 1, sum)
#
# # Make sure Effort doesn't exceed regulated effort
# if (length(MPRecs$Effort) >0 | all(LastEi != 1)) { # an effort regulation also exists
# aboveE <- which(Effort > Ei)
# if (length(aboveE)>0) {
# Effort[aboveE] <- Ei[aboveE] * FinF[aboveE] * apply(fracE2, 1, sum)[aboveE]
# SAYR <- as.matrix(expand.grid(aboveE, 1:maxage, y, 1:nareas))
# SAYRt <- as.matrix(expand.grid(aboveE, 1:maxage, y + nyears, 1:nareas)) # Trajectory year
# SYt <- SAYRt[, c(1, 3)]
# SAYt <- SAYRt[, 1:3]
# SR <- SAYR[, c(1, 4)]
# S1 <- SAYR[, 1]
# SY1 <- SAYR[, c(1, 3)]
# FM_P[SAYR] <- (FinF[S1] * Ei[S1] * V_P[SAYt] * t(Si)[SR] * fishdist[SR] * qvar[SY1] *
# (qs[S1]*(1 + qinc[S1]/100)^y))/Asize[SR]
#
# # retained fishing mortality with input control recommendation
# FM_Pret[SAYR] <- (FinF[S1] * Ei[S1] * retA_P[SAYt] * t(Si)[SR] * fishdist[SR] *
# qvar[SY1] * qs[S1]*(1 + qinc[S1]/100)^y)/Asize[SR]
#
# Z_P[SAYR] <- FM_P[SAYR] + M_ageArray[SAYt] # calculate total mortality
#
# CB_P[SAYR] <- FM_P[SAYR]/Z_P[SAYR] * Biomass_P[SAYR] * (1 - exp(-Z_P[SAYR]))
# CB_Pret[SAYR] <- FM_Pret[SAYR]/Z_P[SAYR] * Biomass_P[SAYR] * (1 - exp(-Z_P[SAYR]))
# }
#
# }
#
# }
#
# # Returns
# out <- list()
# out$TACrec <- TACused
# out$V_P <- V_P
# out$SLarray_P <- SLarray_P
# out$retA_P <- retA_P
# out$retL_P <- retL_P
# out$Fdisc_P <- Fdisc_P
# out$VBiomass_ <- VBiomass_P
# out$Z_P <- Z_P
# out$FM_P <- FM_P
# out$FM_Pret <- FM_Pret
# out$CB_P <- CB_P
# out$CB_Pret <- CB_Pret
# out$Si <- Si
# out$Ai <- Ai
# out$Ei <- Ei
# out$Effort <- Effort
# out
# }
#
# getMSY <- function(x, MatAge, LenAge, WtAge, MatureAge, VAge, maxage, R0, SRrel, hs) {
#
# opt <- optimize(MSYCalcs, log(c(0.001, 10)), MatAge=MatAge[x,], LenAge=LenAge[x,],
# WtAge=WtAge[x,], MatureAge=MatureAge[x,],
# VAge=VAge[x,], maxage=maxage, R0=R0[x], SRrel=SRrel[x], hs=hs[x], opt=1)
#
#
# runMod <- MSYCalcs(logapicF=opt$minimum, MatAge=MatAge[x,], LenAge=LenAge[x,],
# WtAge=WtAge[x,], MatureAge=MatureAge[x,],
# VAge=VAge[x,], maxage=maxage, R0=R0[x], SRrel=SRrel[x], hs=hs[x], opt=2)
#
# runMod
# }
#
# MSYCalcs <- function(logapicF, M_at_Age, WtAge, MatureAge, VAge, maxage, R0, SRrel, hs, opt=1) {
# # Box 3.1 Walters & Martell 2004
# U <- exp(logU)
# lx <- l0 <- rep(1, maxage)
# for (a in 2:maxage) {
# l0[a] <- l0[a-1] * exp(-M_at_Age[a-1])
# lx[a] <- lx[a-1] * exp(-M_at_Age[a-1]) * (1-U*VAge[a-1])
# }
# Egg0 <- sum(l0 * WtAge * MatureAge) # unfished egg production (assuming fecundity proportional to weight)
# EggF <- sum(lx * WtAge * MatureAge) # fished egg production (assuming fecundity proportional to weight)
#
# vB0 <- sum(l0 * WtAge * VAge)
# vBF <- sum(lx * WtAge * VAge)
#
# SB0 <- sum(l0 * WtAge * MatureAge) # same as eggs atm
# SBF <- sum(lx * WtAge * MatureAge)
#
# B0 <- sum(l0 * WtAge)
# BF <- sum(lx * WtAge)
#
# hs[hs>0.999] <- 0.999
# recK <- (4*hs)/(1-hs) # Goodyear compensation ratio
# reca <- recK/Egg0
# if (SRrel ==1) {
# recb <- (reca * Egg0 - 1)/(R0*Egg0) # BH SRR
# RelRec <- (reca * EggF-1)/(recb*EggF)
# }
# if (SRrel ==2) {
# bR <- (log(5*hs)/(0.8*SB0))
# aR <- exp(bR*SB0)/(SB0/R0)
# RelRec <- (log(aR*EggF/R0))/(bR*EggF/R0)
# }
#
# RelRec[RelRec<0] <- 0
#
# Fa <- apicF*VAge
# Za <- Fa + M_at_Age
# relyield <- Fa/Za * lx * (1-exp(-Za)) * WtAge
# YPR <- sum(relyield)
# Yield <- YPR * RelRec
#
# if (opt == 1) return(-Yield)
# if (opt == 2) {
# out <- c(Yield=Yield,
# F= CalculateF(relyield * RelRec, M_at_Age, VAge, lx * WtAge * RelRec),
# SB = SBF * RelRec,
# SB_SB0 = (SBF * RelRec)/(SB0 * R0),
# B_B0 = (BF * RelRec)/(B0 * R0),
# B = BF * RelRec,
# VB = vBF * RelRec,
# VB_VB0 = (vBF * RelRec)/(vB0 * R0),
# RelRec=RelRec,
# SB0 = SB0 * R0,
# B0=B0 * R0,
# apicF=apicF)
#
# return(out)
# }
# }
calcF <- function(x, TACusedE, V_P, Biomass_P, fishdist, Asize, maxage, nareas,
M_ageArray,nyears, y) {
ct <- TACusedE[x]
ft <- ct/sum(Biomass_P[x,,y,] * V_P[x,,y+nyears]) # initial guess
for (i in 1:50) {
Fmat <- ft * matrix(V_P[x,,y+nyears], nrow=maxage, ncol=nareas) *
matrix(fishdist[x,], maxage, nareas, byrow=TRUE)/
matrix(Asize[x,], maxage, nareas, byrow=TRUE) # distribute F over age and areas
Zmat <- Fmat + matrix(M_ageArray[x,,y+nyears], nrow=maxage, ncol=nareas, byrow=FALSE)
predC <- Fmat/Zmat * (1-exp(-Zmat)) * Biomass_P[x,,y,] # predicted catch
pct <- sum(predC)
Omat <- (1-exp(-Zmat)) * Biomass_P[x,,y,]
# derivative of catch wrt ft
dct <- sum(Omat/Zmat - ((Fmat * Omat)/Zmat^2) + Fmat/Zmat * exp(-Zmat) * Biomass_P[x,,y,])
ft <- ft - (pct - ct)/dct
if (abs(pct - ct)<1E-6) break;
}
ft
}
#' Internal wrapper function to calculate MSY reference points
#'
#' @param x Simulation number
#' @param M_ageArray Array of M-at-age
#' @param Wt_age Array of weight-at-age
#' @param Mat_age Array of maturity-at-age
#' @param V Array of selectivity-at-age
#' @param maxage Vector of maximum age
#' @param R0 Vector of R0s
#' @param SRrel SRR type
#' @param hs Vector of steepness
#' @param yr.ind Year index used in calculations
#' @param plusgroup Integer. Default = 0 = no plus-group. Use 1 to include a plus-group
#' @return Results from `MSYCalcs`
#' @export
#'
#' @keywords internal
optMSY_eq <- function(x, M_ageArray, Wt_age, Mat_age, V, maxage, R0, SRrel, hs,
yr.ind=1, plusgroup=0) {
if (length(yr.ind)==1) {
M_at_Age <- M_ageArray[x,,yr.ind]
Wt_at_Age <- Wt_age[x,, yr.ind]
Mat_at_Age <- Mat_age[x,, yr.ind]
V_at_Age <- V[x,, yr.ind]
} else {
M_at_Age <- apply(M_ageArray[x,,yr.ind], 1, mean)
Wt_at_Age <- apply(Wt_age[x,, yr.ind], 1, mean)
Mat_at_Age <- apply(Mat_age[x,, yr.ind], 1, mean)
V_at_Age <- apply(V[x,, yr.ind], 1, mean)
}
boundsF <- c(1E-8, 3)
doopt <- optimise(MSYCalcs, log(boundsF), M_at_Age, Wt_at_Age, Mat_at_Age,
V_at_Age, maxage, R0x=R0[x], SRrelx=SRrel[x], hx=hs[x], opt=1,
plusgroup=plusgroup)
#UMSY <- exp(doopt$minimum)
MSYs <- MSYCalcs(doopt$minimum, M_at_Age, Wt_at_Age, Mat_at_Age,
V_at_Age, maxage, R0x=R0[x], SRrelx=SRrel[x], hx=hs[x], opt=2,
plusgroup=plusgroup)
return(MSYs)
}
#' Internal function to calculate MSY Reference Points
#'
#' @param logF log fishing mortality
#' @param M_at_Age Vector of M-at-age
#' @param Wt_at_Age Vector of weight-at-age
#' @param Mat_at_Age Vector of maturity-at-age
#' @param V_at_Age Vector of selectivity-at-age
#' @param maxage Maximum age
#' @param R0x R0 for this simulation
#' @param SRrelx SRR type for this simulation
#' @param hx numeric. Steepness value for this simulation
#' @param opt Option. 1 = return -Yield, 2= return all MSY calcs
#' @param plusgroup Integer. Default = 0 = no plus-group. Use 1 to include a plus-group
#' @return See `opt`
#' @export
#'
#' @keywords internal
MSYCalcs <- function(logF, M_at_Age, Wt_at_Age, Mat_at_Age, V_at_Age,
maxage, R0x, SRrelx, hx, opt=1, plusgroup=0) {
# Box 3.1 Walters & Martell 2004
FF <- exp(logF)
lx <- rep(1, maxage)
l0 <- c(1, exp(cumsum(-M_at_Age[1:(maxage-1)]))) # unfished survival
surv <- exp(-M_at_Age - FF * V_at_Age)
for (a in 2:maxage) {
lx[a] <- lx[a-1] * surv[a-1] # fished survival
}
if (plusgroup == 1) {
l0[length(l0)] <- l0[length(l0)]/(1-exp(-M_at_Age[length(l0)]))
lx[length(lx)] <- lx[length(lx)]/(1-surv[length(lx)])
}
Egg0 <- sum(l0 * Wt_at_Age * Mat_at_Age) # unfished egg-per-recruit (assuming fecundity proportional to weight)
EggF <- sum(lx * Wt_at_Age * Mat_at_Age) # fished egg-per-recruit (assuming fecundity proportional to weight)
vB0 <- sum(l0 * Wt_at_Age * V_at_Age) # unfished and fished vuln. biomass per-recruit
vBF <- sum(lx * Wt_at_Age * V_at_Age)
SB0 <- sum(l0 * Wt_at_Age * Mat_at_Age) # spawning biomas per-recruit - same as eggs atm
SBF <- sum(lx * Wt_at_Age * Mat_at_Age)
B0 <- sum(l0 * Wt_at_Age) # biomass-per-recruit
BF <- sum(lx * Wt_at_Age)
hx[hx>0.999] <- 0.999
recK <- (4*hx)/(1-hx) # Goodyear compensation ratio
reca <- recK/Egg0
SPR <- EggF/Egg0
# Calculate equilibrium recruitment at this SPR
if (SRrelx ==1) { # BH SRR
recb <- (reca * Egg0 - 1)/(R0x*Egg0)
RelRec <- (reca * EggF-1)/(recb*EggF)
}
if (SRrelx ==2) { # Ricker
bR <- (log(5*hx)/(0.8*SB0))
aR <- exp(bR*SB0)/(SB0/R0x)
RelRec <- (log(aR*EggF/R0x))/(bR*EggF/R0x)
}
RelRec[RelRec<0] <- 0
Z_at_Age <- FF * V_at_Age + M_at_Age
YPR <- sum(lx * Wt_at_Age * FF * V_at_Age * (1 - exp(-Z_at_Age))/Z_at_Age)
Yield <- YPR * RelRec
if (opt == 1) return(-Yield)
if (opt == 2) {
out <- c(Yield=Yield,
F= FF,
SB = SBF * RelRec,
SB_SB0 = (SBF * RelRec)/(SB0 * R0x),
B_B0 = (BF * RelRec)/(B0 * R0x),
B = BF * RelRec,
VB = vBF * RelRec,
VB_VB0 = (vBF * RelRec)/(vB0 * R0x),
RelRec=RelRec,
SB0 = SB0 * R0x,
B0=B0 * R0x)
return(out)
}
}
# optMSY_eq <- function(x, M_ageArray, Wt_age, Mat_age, V, maxage, R0, SRrel, hs, yr=1) {
# boundsU <- c(0.0000001, 1)
#
# doopt <- optimise(MSYCalcs, log(boundsU), M_at_Age=M_ageArray[x,,yr], WtAge=Wt_age[x,,yr],
# MatureAge=Mat_age[x,,yr], VAge=V[x,,yr], maxage, R0=R0[x], SRrel=SRrel[x], hs=hs[x], opt=1)
#
# apicFMSY <- exp(doopt$minimum)
# apicFMSY2 <- apicFMSY
#
# MSYs <- MSYCalcs(log(apicFMSY), M_at_Age=M_ageArray[x,,yr], WtAge=Wt_age[x,,yr],
# MatureAge=Mat_age[x,,yr], VAge=V[x,,yr], maxage, R0=R0[x], SRrel=SRrel[x], hs=hs[x], opt=2)
#
# if (MSYs[1] < 1) {
# count <- 0; stop <- FALSE
# while (apicFMSY > 0.95 * max(bounds) & count < 50 & !stop) {
# count <- count + 1
# bounds <- c(0.0000001, max(bounds)-0.1)
# if (bounds[1] < bounds[2]) {
# doopt <- optimise(MSYCalcs, log(bounds), M_at_Age=M_ageArray[x,,yr], WtAge=Wt_age[x,,yr],
# MatureAge=Mat_age[x,,yr], VAge=V[x,,yr], maxage, R0=R0[x], SRrel=SRrel[x], hs=hs[x], opt=1)
# apicFMSY <- exp(doopt$minimum)
# } else {
# stop <- TRUE
# }
# }
# if (count >=50 | stop) apicFMSY <- apicFMSY2
# MSYs <- MSYCalcs(log(apicFMSY), M_at_Age=M_ageArray[x,,yr], WtAge=Wt_age[x,,yr],
# MatureAge=Mat_age[x,,yr], VAge=V[x,,yr], maxage, R0=R0[x], SRrel=SRrel[x], hs=hs[x], opt=2)
# }
# return(MSYs)
#
# }
split.along.dim <- function(a, n) {
stats::setNames(lapply(split(a, arrayInd(seq_along(a), dim(a))[, n]),
array, dim = dim(a)[-n], dimnames(a)[-n]),
dimnames(a)[[n]])
}
#' optimize for catchability (q)
#'
#' Function optimizes catchability (q, where F=qE) required to get to user-specified stock
#' depletion
#'
#' @param x Integer, the simulation number
#' @param D A numeric vector nsim long of sampled depletion
#' @param SSB0 A numeric vector nsim long of total unfished spawning biomass
#' @param nareas The number of spatial areas
#' @param maxage The maximum age
#' @param N Array of the numbers-at-age in population. Dimensions are nsim, maxage, nyears, nareas.
#' Only values from the first year (i.e `N[,,1,]`) are used, which is the current N-at-age.
#' @param pyears The number of years to project forward. Equal to 'nyears' for optimizing for q.
#' @param M_ageArray An array (dimensions nsim, maxage, nyears+proyears) with the natural mortality-at-age and year
#' @param Mat_age An array (dimensions nsim, maxage, proyears+nyears) with the proportion mature for each age-class
#' @param Asize A matrix (dimensions nsim, nareas) with size of each area
#' @param Wt_age An array (dimensions nsim, maxage, nyears+proyears) with the weight-at-age and year
#' @param V An array (dimensions nsim, maxage, nyears+proyears) with the vulnerability-at-age and year
#' @param retA An array (dimensions nsim, maxage, nyears+proyears) with the probability retained-at-age and year
#' @param Perr A matrix (dimensions nsim, nyears+proyears) with the recruitment deviations
#' @param mov An array (dimensions nsim, nareas, nareas, nyears+proyears) with the movement matrix
#' @param SRrel A numeric vector nsim long specifying the recruitment curve to use
#' @param Find A matrix (dimensions nsim, nyears) with the historical fishing effort
#' @param Spat_targ A numeric vector nsim long with the spatial targeting
#' @param hs A numeric vector nsim long with the steepness values for each simulation
#' @param R0a A matrix (dimensions nsim, nareas) with the unfished recruitment by area
#' @param SSBpR A matrix (dimensions nsim, nareas) with the unfished spawning-per-recruit by area
#' @param aR A numeric vector nareas long with the Ricker SRR a values
#' @param bR A numeric vector nareas long with the Ricker SRR b values
#' @param bounds A numeric vector of length 2 with bounds for the optimizer
#' @param maxF A numeric value specifying the maximum fishing mortality for any single age class
#' @param MPA A matrix of spatial closures by year
#' @param plusgroup Integer. Default = 0 = no plus-group. Use 1 to include a plus-group
#' @param VB0 numeric vector nsim long of total unfished vulnerable biomass
#' @param optVB Logical. Optimize for vulnerable biomass?
#' @author A. Hordyk
#' @keywords internal
getq3 <- function(x, D, SSB0, nareas, maxage, N, pyears, M_ageArray, Mat_age, Asize, Wt_age,
V, retA, Perr, mov, SRrel, Find, Spat_targ, hs, R0a, SSBpR, aR, bR,
bounds = c(1e-05, 15), maxF, MPA, plusgroup, VB0, optVB) {
opt <- optimize(optQ, log(bounds), depc=D[x], SSB0c=SSB0[x], nareas, maxage, Ncurr=N[x,,1,],
pyears, M_age=M_ageArray[x,,], MatAge=Mat_age[x,,], Asize_c=Asize[x,], WtAge=Wt_age[x,,],
Vuln=V[x,,], Retc=retA[x,,], Prec=Perr[x,], movc=split.along.dim(mov[x,,,,],4),
SRrelc=SRrel[x],
Effind=Find[x,], Spat_targc=Spat_targ[x], hc=hs[x], R0c=R0a[x,],
SSBpRc=SSBpR[x,], aRc=aR[x,], bRc=bR[x,], maxF=maxF, MPA=MPA,
plusgroup=plusgroup, VB0[x], optVB)
return(exp(opt$minimum))
}
#' Optimize q for a single simulation
#'
#' @param logQ log q
#' @param depc Depletion value
#' @param SSB0c Unfished spawning biomass
#' @param nareas Number of areas
#' @param maxage Maximum age
#' @param Ncurr Current N-at-age
#' @param pyears Number of years to project population dynamics
#' @param M_age M-at-age
#' @param Asize_c Numeric vector (length nareas) with size of each area
#' @param MatAge Maturity-at-age
#' @param WtAge Weight-at-age
#' @param Vuln Vulnerability-at-age
#' @param Retc Retention-at-age
#' @param Prec Recruitment error by year
#' @param movc movement matrix
#' @param SRrelc SR parameter
#' @param Effind Historical fishing effort
#' @param Spat_targc Spatial targetting
#' @param hc Steepness
#' @param R0c Unfished recruitment by area
#' @param SSBpRc Unfished spawning biomass per recruit by area
#' @param aRc Ricker aR
#' @param bRc Ricker bR
#' @param maxF maximum F
#' @param MPA A matrix of spatial closures by year
#' @param plusgroup Integer. Default = 0 = no plus-group. Use 1 to include a plus-group
#' @param VB0c Unfished vulnerable biomass
#' @param optVB Logical. Optimize for vulnerable biomass?
#' @author A. Hordyk
#' @keywords internal
optQ <- function(logQ, depc, SSB0c, nareas, maxage, Ncurr, pyears, M_age, Asize_c,
MatAge, WtAge, Vuln, Retc, Prec, movc, SRrelc, Effind, Spat_targc, hc,
R0c, SSBpRc, aRc, bRc, maxF, MPA, plusgroup, VB0c, optVB) {
simpop <- popdynCPP(nareas, maxage, Ncurr, pyears, M_age, Asize_c,
MatAge, WtAge, Vuln, Retc, Prec, movc, SRrelc, Effind, Spat_targc, hc,
R0c=R0c, SSBpRc=SSBpRc, aRc=aRc, bRc=bRc, Qc=exp(logQ), Fapic=0,
maxF=maxF, MPA=MPA, control=1, SSB0c=SSB0c,
plusgroup=plusgroup)
ssb <- sum(simpop[[4]][,pyears,])
vb <- sum(simpop[[5]][,pyears,])
if (optVB) {
return((log(depc) - log(vb/VB0c))^2)
}
else {
return((log(depc) - log(ssb/SSB0c))^2)
}
}
# #' Population dynamics model
# #'
# #' @param nareas Integer. The number of spatial areas
# #' @param maxage Integer. The maximum age
# #' @param Ncurr Numeric matrix (dimensions maxage, nareas) with the current N-at-age
# #' @param pyears Integer. Number of years to project the model forward
# #' @param M_age Numeric matrix (dimensions maxage, pyears) with natural mortality at age
# #' @param Asize_c Numeric vector (length nareas) with size of each area
# #' @param MatAge Numeric matrix (dimensions maxage, nyears+proyears) with proportion mature for each age-class
# #' @param WtAge Numeric matrix (dimensions maxage, pyears) with weight-at-age
# #' @param Vuln Numeric matrix (dimensions maxage, pyears) with proportion vulnerable-at-age
# #' @param Retc Numeric matrix (dimensions maxage, pyears) with proportion retained-at-age
# #' @param Prec Numeric vector (length pyears) with recruitment error
# #' @param movc Numeric matrix (dimensions nareas, nareas) with movement matrix
# #' @param SRrelc Integer. Stock-recruitment curve
# #' @param Effind Numeric vector (length pyears) with the fishing effort by year
# #' @param Spat_targc Integer. Value of spatial targetting
# #' @param hc Numeric. Steepness of stock-recruit relationship
# #' @param R0c Numeric vector of length nareas with unfished recruitment by area
# #' @param SSBpRc Numeric vector of length nareas with unfished spawning per recruit by area
# #' @param aRc Numeric. Ricker SRR a value
# #' @param bRc Numeric. Ricker SRR b value
# #' @param Qc Numeric. Catchability coefficient
# #' @param Fapic Numeric. Apical F value
# #' @param maxF A numeric value specifying the maximum fishing mortality for any single age class
# #' @param MPA A matrix of spatial closures by year
# #' @param control Integer. 1 to use q and effort to calculate F, 2 to use Fapic (apical F) and
# #' vulnerablity to calculate F.
# #'
# #' @author A. Hordyk
# #'
# #' @return A named list of length 8 containing with arrays (dimensions: maxage, pyears, nareas)
# #' containing numbers-at-age, biomass-at-age, spawning stock numbers, spawning biomass,
# #' vulnerable biomass, fishing mortality, retained fishing mortality, and total mortality
# # #' @export
# #'
# popdyn <- function(nareas, maxage, Ncurr, pyears, M_age, Asize_c,
# MatAge, WtAge, Vuln, Retc, Prec, movc, SRrelc, Effind, Spat_targc, hc,
# R0c, SSBpRc, aRc, bRc, Qc, Fapic=NULL, maxF, MPA, control=1) {
# Narray <- array(NA, dim=c(maxage, pyears, nareas))
# Barray <- array(NA, dim=c(maxage, pyears, nareas))
# SSNarray <- array(NA, dim=c(maxage, pyears, nareas))
# SBarray <- array(NA, dim=c(maxage, pyears, nareas))
# VBarray <- array(NA, dim=c(maxage, pyears, nareas))
# Marray <- array(NA, dim=c(maxage, pyears, nareas))
# FMarray <- array(NA, dim=c(maxage, pyears, nareas))
# FMretarray <- array(NA, dim=c(maxage, pyears, nareas))
# Zarray <- array(NA, dim=c(maxage, pyears, nareas))
#
# Narray[,1,] <- Ncurr
# Barray[,1,] <- Narray[,1,] * WtAge[,1]
# SSNarray[,1,] <- Ncurr * MatAge[,1] # spawning stock numbers
# SBarray[,1,] <- Narray[,1,] * WtAge[,1] * MatAge[,1] # spawning biomass
# VBarray[,1,] <- Narray[,1,] * WtAge[,1] * Vuln[,1] # vulnerable biomass
# Marray[,1,] <- M_age[,1] # M-at-age
#
# SAYR <- as.matrix(expand.grid(1:maxage, 1, 1:nareas)) # Set up some array indexes age (A) year (Y) region/area (R)
#
# # Distribution of fishing effort
# VBa <- colSums(VBarray[,1,]) # total vuln biomass in each area
#
# # fishdist <- VBa^Spat_targc/mean(VBa^Spat_targc)
# fishdist <- VBa^Spat_targc/sum(VBa^Spat_targc)
#
# Asize_mat <- matrix(Asize_c, nrow=maxage, ncol=nareas, byrow=TRUE)
#
# if (control == 1) {
# FMarray[SAYR] <- (Effind[SAYR[,2]] * Qc * Vuln[SAYR[,1:2]] * fishdist[SAYR[,3]])/Asize_mat
# FMretarray[SAYR] <- (Effind[SAYR[,2]] * Qc * Retc[SAYR[,1:2]] * fishdist[SAYR[,3]])/Asize_mat
# }
# if (control == 2) {
# FMarray[SAYR] <- (Fapic * Vuln[SAYR[,1:2]] * fishdist[SAYR[,3]])/Asize_mat
# FMretarray[SAYR] <- (Fapic * Retc[SAYR[,1:2]] * fishdist[SAYR[,3]])/Asize_mat
# }
#
# FMarray[,1,][FMarray[,1,] > (1 - exp(-maxF))] <- 1 - exp(-maxF)
# FMretarray[,1,][FMretarray[,1,] > (1 - exp(-maxF))] <- 1 - exp(-maxF)
#
# Zarray[,1,] <- Marray[,1,] + FMarray[,1,]
#
# for (y in 1:(pyears-1)) {
#
# NextYrN <- popdynOneTS(nareas, maxage, SSBcurr=colSums(SBarray[,y,]), Ncurr=Narray[,y,],
# Zcurr=Zarray[,y,], PerrYr=Prec[y+maxage+1], hc, R0c, SSBpRc, aRc, bRc,
# movc, SRrelc)
#
# Narray[,y+1,] <- NextYrN
# Barray[,y+1,] <- Narray[,y+1,] * WtAge[,y+1]
# SSNarray[,y+1,] <- Narray[,y+1,] * MatAge[,y+1] # spawning stock numbers
# SBarray[,y+1,] <- Narray[,y+1,] * WtAge[,y+1] * MatAge[,y+1] # spawning biomass
# VBarray[,y+1,] <- Narray[,y+1,] * WtAge[,y+1] * Vuln[,y+1] # vulnerable biomass
# Marray[, y+1, ] <- M_age[,y+1]
#
# # Distribution of fishing effort
# VBa <- colSums(VBarray[,y+1,]) # total vuln biomass in each area
# # fishdist <- VBa^Spat_targc/mean(VBa^Spat_targc)
# fishdist <- VBa^Spat_targc/sum(VBa^Spat_targc)
#
# d1 <- t(matrix(MPA[y,])) * fishdist # distribution of fishing effort
# fracE <- apply(d1, 1, sum) # fraction of current effort in open areas
# fracE2 <- d1 * (fracE + (1-fracE))/fracE # re-distribution of fishing effort
# fishdist <- fracE2 # fishing effort by area
#
#
# SAYR <- as.matrix(expand.grid(1:maxage, y+1, 1:nareas)) # Set up some array indexes age (A) year (Y) region/area (R)
# if (control ==1) {
# FMarray[SAYR] <- (Effind[SAYR[,2]] * Qc * Vuln[SAYR[,1:2]] * fishdist[SAYR[,3]])/Asize_mat
# FMretarray[SAYR] <- (Effind[SAYR[,2]] * Qc * Retc[SAYR[,1:2]] * fishdist[SAYR[,3]])/Asize_mat
# }
# if (control ==2) {
# FMarray[SAYR] <- (Fapic * Vuln[SAYR[,1:2]] * fishdist[SAYR[,3]])/Asize_mat
# FMretarray[SAYR] <- (Fapic * Retc[SAYR[,1:2]] * fishdist[SAYR[,3]])/Asize_mat
# }
# FMarray[SAYR][FMarray[SAYR] > (1 - exp(-maxF))] <- 1 - exp(-maxF)
# FMretarray[SAYR][FMretarray[SAYR] > (1 - exp(-maxF))] <- 1 - exp(-maxF)
# Zarray[,y+1,] <- Marray[,y+1,] + FMarray[,y+1,]
#
# }
#
# out <- list()
# out$Narray <- Narray
# out$Barray <- Barray
# out$SSNarray <- SSNarray
# out$SBarray <- SBarray
# out$VBarray <- VBarray
# out$FMarray <- FMarray
# out$FMretarray <- FMretarray
# out$Zarray <- Zarray
#
# out
# }
#
# #' Population dynamics model for one annual time-step
# #'
# #' Project population forward one time-step given current numbers-at-age and total mortality
# #'
# #' @param nareas The number of spatial areas
# #' @param maxage The maximum age
# #' @param SSBcurr A numeric vector of length nareas with the current spawning biomass in each area
# #' @param Ncurr A numeric matrix (maxage, nareas) with current numbers-at-age in each area
# #' @param Zcurr A numeric matrix (maxage, nareas) with total mortality-at-age in each area
# #' @param PerrYr A numeric value with recruitment deviation for current year
# #' @param hs Steepness of SRR
# #' @param R0c Numeric vector with unfished recruitment by area
# #' @param SSBpRc Numeric vector with unfished spawning stock per recruit by area
# #' @param aRc Numeric vector with Ricker SRR a parameter by area
# #' @param bRc Numeric vector with Ricker SRR b parameter by area
# #' @param movc Numeric matrix (nareas by nareas) with the movement matrix
# #' @param SRrelc Integer indicating the stock-recruitment relationship to use (1 for Beverton-Holt, 2 for Ricker)
# #' @author A. Hordyk
# #'
# # #' @export
# #' @keywords internal
# popdynOneTS <- function(nareas, maxage, SSBcurr, Ncurr, Zcurr,
# PerrYr, hc, R0c, SSBpRc, aRc, bRc, movc, SRrelc) {
#
# # set up some indices for indexed calculation
#
# indMov <- as.matrix(expand.grid(1:maxage,1:nareas, 1:nareas)) # Movement master index
# indMov2 <- indMov[, c(1, 2)] # Movement from index
# indMov3 <- indMov[, c(2, 3)] # Movement to index
#
# Nnext <- array(NA, dim=c(maxage, nareas))
#
# # Recruitment assuming regional R0 and stock wide steepness
# if (SRrelc[1] == 1) {
# Nnext[1, ] <- PerrYr * (4 * R0c * hc * SSBcurr)/(SSBpRc * R0c * (1-hc) + (5*hc-1)*SSBcurr)
# } else {
# # most transparent form of the Ricker uses alpha and beta params
# Nnext[1, ] <- PerrYr * aRc * SSBcurr * exp(-bRc * SSBcurr)
# }
#
# # Mortality
# Nnext[2:maxage, ] <- Ncurr[1:(maxage - 1), ] * exp(-Zcurr[1:(maxage - 1), ]) # Total mortality
#
# # Movement of stock
# temp <- array(Nnext[indMov2] * movc[indMov3], dim = c(maxage,nareas, nareas)) # Move individuals
# Nnext <- apply(temp, c(1, 3), sum)
#
# # Numbers-at-age at beginning of next year
# return(Nnext)
#
# }
#
#
# #' Simulate population dynamics for historical years
# #'
# #' @param x Integer, the simulation number
# #' @param nareas The number of spatial areas
# #' @param maxage The maximum age
# #' @param N Array of the numbers-at-age in population. Dimensions are nsim, maxage, nyears, nareas.
# #' Only values from the first year (i.e `N[,,1,]`) are used, which is the current N-at-age.
# #' @param pyears The number of years to project forward. Equal to 'nyears' for optimizing for q.
# #' @param M_ageArray An array (dimensions nsim, maxage, nyears+proyears) with the natural mortality-at-age and year
# #' @param Asize A matrix (dimensions nsim, nareas) of size of areas
# #' @param Mat_age A matrix (dimensions nsim, maxage) with the proportion mature for each age-class
# #' @param Wt_age An array (dimensions nsim, maxage, nyears+proyears) with the weight-at-age and year
# #' @param V An array (dimensions nsim, maxage, nyears+proyears) with the vulnerability-at-age and year
# #' @param retA An array (dimensions nsim, maxage, nyears+proyears) with the probability retained-at-age and year
# #' @param Perr A matrix (dimensions nsim, nyears+proyears) with the recruitment deviations
# #' @param mov An array (dimensions nsim, nareas, nareas) with the movement matrix
# #' @param SRrel A numeric vector nsim long specifying the recruitment curve to use
# #' @param Find A matrix (dimensions nsim, nyears) with the historical fishing effort
# #' @param Spat_targ A numeric vector nsim long with the spatial targeting
# #' @param hs A numeric vector nsim long with the steepness values for each simulation
# #' @param R0a A matrix (dimensions nsim, nareas) with the unfished recruitment by area
# #' @param SSBpR A matrix (dimensions nsim, nareas) with the unfished spawning-per-recruit by area
# #' @param aR A numeric vector nsim long with the Ricker SRR a values
# #' @param bR A numeric vector nsim long with the Ricker SRR b values
# #' @param qs A numeric vector nsim long with catchability coefficients
# #' @param MPA A matrix of spatial closures by year
# #' @param maxF A numeric value specifying the maximum fishing mortality for any single age class
# #' @param useCPP logical - use the CPP code? For testing purposes only
# #' @param SSB0 SSB0
# #' @author A. Hordyk
# #' @keywords internal
# #' @export
# simYears <- function(x, nareas, maxage, N, pyears, M_ageArray, Asize, Mat_age, Wt_age,
# V, retA, Perr, mov, SRrel, Find, Spat_targ, hs, R0a, SSBpR, aR, bR, qs,
# MPA, maxF, useCPP=TRUE, SSB0) {
# if(!useCPP) {
# # popdyn(nareas, maxage, Ncurr=N[x,,1,], pyears,
# # M_age=M_ageArray[x,,], Asize_c=Asize[x,], MatAge=Mat_age[x,,], WtAge=Wt_age[x,,],
# # Vuln=V[x,,], Retc=retA[x,,], Prec=Perr[x,], movc=mov[x,,,], SRrelc=SRrel[x],
# # Effind=Find[x,], Spat_targc=Spat_targ[x], hc=hs[x], R0c=R0a[x,],
# # SSBpRc=SSBpR[x,], aRc=aR[x,], bRc=bR[x,], Qc=qs[x], MPA=MPA, maxF=maxF, control=1)
# # doesn't currently work with age-based movement
# } else {
# popdynCPP(nareas, maxage, Ncurr=N[x,,1,], pyears,
# M_age=M_ageArray[x,,], Asize_c=Asize[x,], MatAge=Mat_age[x,,], WtAge=Wt_age[x,,],
# Vuln=V[x,,], Retc=retA[x,,], Prec=Perr[x,], movc=mov[x,,,], SRrelc=SRrel[x],
# Effind=Find[x,], Spat_targc=Spat_targ[x], hc=hs[x], R0c=R0a[x,],
# SSBpRc=SSBpR[x,], aRc=aR[x,], bRc=bR[x,], Qc=qs[x], Fapic=0, MPA=MPA, maxF=maxF,
# control=1, SSB0c=SSB0[x])
# }
#
# }
# #' Calculate FMSY and related metrics using Rcpp code
# #'
# #' @param x Integer, the simulation number
# #' @param Asize A matrix (nsim by nareas) with size of areas
# #' @param nareas The number of spatial areas
# #' @param maxage The maximum age
# #' @param N Array of the numbers-at-age in population. Dimensions are nsim, maxage, nyears, nareas.
# #' Only values from the first year (i.e `N[,,1,]`) are used, which is the current N-at-age.
# #' @param pyears The number of years to project forward. Equal to 'nyears' for optimizing for q.
# #' @param M_ageArray An array (dimensions nsim, maxage, nyears+proyears) with the natural mortality-at-age and year
# #' @param Mat_age A matrix (dimensions nsim, maxage) with the proportion mature for each age-class
# #' @param Wt_age An array (dimensions nsim, maxage, nyears+proyears) with the weight-at-age and year
# #' @param V An array (dimensions nsim, maxage, nyears+proyears) with the vulnerability-at-age and year
# #' @param retA An array (dimensions nsim, maxage, nyears+proyears) with the probability retained-at-age and year
# #' @param Perr A matrix (dimensions nsim, nyears+proyears) with the recruitment deviations
# #' @param mov An array (dimensions nsim, nareas, nareas) with the movement matrix
# #' @param SRrel A numeric vector nsim long specifying the recruitment curve to use
# #' @param Find A matrix (dimensions nsim, nyears) with the historical fishing effort
# #' @param Spat_targ A numeric vector nsim long with the spatial targeting
# #' @param hs A numeric vector nsim long with the steepness values for each simulation
# #' @param R0a A matrix (dimensions nsim, nareas) with the unfished recruitment by area
# #' @param SSBpR A matrix (dimensions nsim, nareas) with the unfished spawning-per-recruit by area
# #' @param aR A numeric vector nsim long with the Ricker SRR a values
# #' @param bR A numeric vector nsim long with the Ricker SRR b values
# #' @param SSB0 Unfished spawning biomass
# #' @param B0 Unfished total biomass
# #' @param MPA A matrix of spatial closures by year
# #' @param maxF A numeric value specifying the maximum fishing mortality for any single age class
# #' @param useCPP logical - use the CPP code? For testing purposes only
# #'
# #' @author A. Hordyk
# #'
# getFMSY3 <- function(x, Asize, nareas, maxage, N, pyears, M_ageArray, Mat_age, Wt_age,
# V, retA, Perr, mov, SRrel, Find, Spat_targ, hs, R0a, SSBpR, aR, bR,
# SSB0, B0, MPA, maxF, useCPP=TRUE) {
#
# opt <- optimize(optMSY, log(c(0.001, 10)), Asize_c=Asize[x,], nareas, maxage, Ncurr=N[x,,1,],
# pyears, M_age=M_ageArray[x,,], MatAge=Mat_age[x,,],
# WtAge=Wt_age[x,,], Vuln=V[x,,], Retc=retA[x,,], Prec=Perr[x,],
# movc=mov[x,,,], SRrelc=SRrel[x],
# Effind=Find[x,], Spat_targc=Spat_targ[x], hc=hs[x], R0c=R0a[x,],
# SSBpRc=SSBpR[x,], aRc=aR[x,], bRc=bR[x,], MPA=MPA, maxF=maxF, useCPP=useCPP,
# SSB0c=SSB0[x])
#
# MSY <- -opt$objective
#
# if (!useCPP) {
# # simpop <- popdyn(nareas, maxage, Ncurr=N[x,,1,],
# # pyears, M_age=M_ageArray[x,,], Asize_c=Asize[x,],
# # MatAge=Mat_age[x,,],
# # WtAge=Wt_age[x,,], Vuln=V[x,,], Retc=retA[x,,], Prec=Perr[x,],
# # movc=mov[x,,,], SRrelc=SRrel[x],
# # Effind=Find[x,], Spat_targc=Spat_targ[x], hc=hs[x], R0c=R0a[x,],
# # SSBpRc=SSBpR[x,], aRc=aR[x,], bRc=bR[x,], Fapic=exp(opt$minimum), MPA=MPA, maxF=maxF, control=2)
# #
# # # calculate B0 and SSB0 with current conditions
# # simpopF0 <- popdyn(nareas, maxage, Ncurr=N[x,,1,],
# # pyears, M_age=M_ageArray[x,,], Asize_c=Asize[x,],
# # MatAge=Mat_age[x,,],
# # WtAge=Wt_age[x,,], Vuln=V[x,,], Retc=retA[x,,], Prec=Perr[x,],
# # movc=mov[x,,,], SRrelc=SRrel[x],
# # Effind=Find[x,], Spat_targc=Spat_targ[x], hc=hs[x], R0c=R0a[x,],
# # SSBpRc=SSBpR[x,], aRc=aR[x,], bRc=bR[x,], Fapic=0, MPA=MPA, maxF=maxF, control=2)
#
# } else {
# simpop <- popdynCPP(nareas, maxage, Ncurr=N[x,,1,],
# pyears, M_age=M_ageArray[x,,], Asize_c=Asize[x,],
# MatAge=Mat_age[x,,],
# WtAge=Wt_age[x,,], Vuln=V[x,,], Retc=retA[x,,], Prec=Perr[x,],
# movc=mov[x,,,], SRrelc=SRrel[x],
# Effind=Find[x,], Spat_targc=Spat_targ[x], hc=hs[x], R0c=R0a[x,],
# SSBpRc=SSBpR[x,], aRc=aR[x,], bRc=bR[x,], Qc=0, Fapic=exp(opt$minimum),
# MPA=MPA, maxF=maxF, control=2, SSB0c = SSB0[x])
# # calculate B0 and SSB0 with current conditions
# simpopF0 <- popdynCPP(nareas, maxage, Ncurr=N[x,,1,],
# pyears, M_age=M_ageArray[x,,], Asize_c=Asize[x,],
# MatAge=Mat_age[x,,],
# WtAge=Wt_age[x,,], Vuln=V[x,,], Retc=retA[x,,], Prec=Perr[x,],
# movc=mov[x,,,], SRrelc=SRrel[x],
# Effind=Find[x,], Spat_targc=Spat_targ[x], hc=hs[x], R0c=R0a[x,],
# SSBpRc=SSBpR[x,], aRc=aR[x,], bRc=bR[x,], Qc=0, Fapic=0, MPA=MPA, maxF=maxF,
# control=2, SSB0c = SSB0[x])
# }
#
#
# ## Cn <- simpop[[7]]/simpop[[8]] * simpop[[1]] * (1-exp(-simpop[[8]])) # retained catch
# Cn <- simpop[[6]]/simpop[[8]] * simpop[[1]] * (1-exp(-simpop[[8]])) # removals
# Cb <- Cn[,pyears,] * Wt_age[x,,pyears]
#
# B <- sum(simpop[[2]][,pyears,] + Cb)
#
# SSB_MSY <- sum(simpop[[4]][,pyears,])
#
# V_BMSY <- sum(simpop[[5]][,pyears,])
# F_MSYv <- -log(1 - (MSY/(V_BMSY+MSY)))
#
#
# SSB0_curr <- sum(simpopF0[[4]][,pyears,])
# B0_curr <- sum(simpopF0[[2]][,pyears,])
# SSBMSY_SSB0 <- sum(simpop[[4]][,pyears,])/SSB0_curr
# BMSY_B0 <- sum(simpop[[2]][,pyears,])/B0_curr
# # SSBMSY_SSB0 <- sum(simpop[[4]][,pyears,])/SSB0[x]
# # BMSY_B0 <- sum(simpop[[2]][,pyears,])/B0[x]
#
#
# return(c(MSY = MSY, FMSY = F_MSYv, SSB = SSB_MSY, SSBMSY_SSB0=SSBMSY_SSB0,
# BMSY_B0=BMSY_B0, B = B, VB=V_BMSY+MSY))
#
# }
#
#
#
#
#' Optimize yield for a single simulation
#'
#' @param logFa log apical fishing mortality
#' @param Asize_c A vector of length areas with relative size of areas
#' @param nareas Number of area
#' @param maxage Maximum age
#' @param Ncurr Current N-at-age
#' @param pyears Number of projection years
#' @param M_age M-at-age
#' @param MatAge Maturity-at-age
#' @param WtAge Weight-at-age
#' @param Vuln Vulnerablity-at-age
#' @param Retc Retention-at-age
#' @param Prec Recruitment error
#' @param movc Movement matrix
#' @param SRrelc SR Relationship
#' @param Effind Historical effort
#' @param Spat_targc Spatial targeting
#' @param hc Steepness
#' @param R0c Unfished recruitment by area
#' @param SSBpRc Unfished spawning stock per recruit by area
#' @param aRc Ricker aR
#' @param bRc Ricker bR
#' @param Qc Catchability
#' @param MPA A matrix of spatial closures by year
#' @param maxF A numeric value specifying the maximum fishing mortality for any single age class
#' @param SSB0c SSB0
#' @param plusgroup Integer. Default = 0 = no plus-group. Use 1 to include a plus-group
#' @keywords internal
#'
#' @author A. Hordyk
#'
optMSY <- function(logFa, Asize_c, nareas, maxage, Ncurr, pyears, M_age,
MatAge, WtAge, Vuln, Retc, Prec, movc, SRrelc, Effind, Spat_targc, hc,
R0c, SSBpRc, aRc, bRc, Qc, MPA, maxF, SSB0c,
plusgroup=0) {
FMSYc <- exp(logFa)
simpop <- popdynCPP(nareas, maxage, Ncurr, pyears, M_age, Asize_c,
MatAge, WtAge, Vuln, Retc, Prec, movc, SRrelc, Effind, Spat_targc, hc,
R0c, SSBpRc, aRc, bRc, Qc=0, Fapic=FMSYc, MPA=MPA, maxF=maxF, control=2,
SSB0c=SSB0c, plusgroup = plusgroup)
# Yield
# Cn <- simpop[[7]]/simpop[[8]] * simpop[[1]] * (1-exp(-simpop[[8]])) # retained catch
Cn <- simpop[[6]]/simpop[[8]] * simpop[[1]] * (1-exp(-simpop[[8]])) # removals
# Cb <- Cn[,pyears,] * WtAge[,pyears]
# -sum(Cb)
Cb <- Cn[,(pyears-4):pyears,] * array(WtAge[,(pyears-4):pyears], dim=dim(Cn[,(pyears-4):pyears,]))
-mean(apply(Cb,2,sum))
}
#' Calculate Reference Yield
#'
#' @param x Integer, the simulation number
#' @param Asize A matrix (dimensions nsim by nareas) with relative size of areas
#' @param nareas The number of spatial areas
#' @param maxage The maximum age
#' @param N Array of the numbers-at-age in population. Dimensions are nsim, maxage, nyears, nareas.
#' Only values from the first year are used, which is the current N-at-age.
#' @param pyears The number of years to project forward. Equal to 'nyears' for optimizing for q.
#' @param M_ageArray An array (dimensions nsim, maxage, nyears+proyears) with the natural mortality-at-age and year
#' @param Mat_age An array (dimensions nsim, maxage, nyears+proyears) with the proportion mature for each age-class
#' @param Wt_age An array (dimensions nsim, maxage, nyears+proyears) with the weight-at-age and year
#' @param V An array (dimensions nsim, maxage, nyears+proyears) with the vulnerability-at-age and year
#' @param retA An array (dimensions nsim, maxage, nyears+proyears) with the probability retained-at-age and year
#' @param Perr A matrix (dimensions nsim, nyears+proyears) with the recruitment deviations
#' @param mov An array (dimensions nsim, nareas, nareas) with the movement matrix
#' @param SRrel A numeric vector nsim long specifying the recruitment curve to use
#' @param Find A matrix (dimensions nsim, nyears) with the historical fishing effort
#' @param Spat_targ A numeric vector nsim long with the spatial targeting
#' @param hs A numeric vector nsim long with the steepness values for each simulation
#' @param R0a A matrix (dimensions nsim, nareas) with the unfished recruitment by area
#' @param SSBpR A matrix (dimensions nsim, nareas) with the unfished spawning-per-recruit by area
#' @param aR A numeric vector nareas long with the Ricker SRR a values
#' @param bR A numeric vector nareas long with the Ricker SRR b values
#' @param MPA A matrix of spatial closures by year
#' @param maxF A numeric value specifying the maximum fishing mortality for any single age class
#' @param useCPP logical - use the CPP code? For testing purposes only
#' @param SSB0 SSB0
#' @param plusgroup Integer. Default = 0 = no plus-group. Use 1 to include a plus-group
#' @author A. Hordyk
#' @export
#' @keywords internal
getFref3 <- function(x, Asize, nareas, maxage, N, pyears, M_ageArray, Mat_age, Wt_age,
V, retA, Perr, mov, SRrel, Find, Spat_targ, hs, R0a, SSBpR, aR, bR,
MPA, maxF, SSB0, plusgroup=0) {
opt <- optimize(optMSY, log(c(0.001, 10)), Asize_c=Asize[x,], nareas, maxage, Ncurr=N[x,,1,],
pyears, M_age=M_ageArray[x,,], MatAge=Mat_age[x,,],
WtAge=Wt_age[x,,], Vuln=V[x,,], Retc=retA[x,,], Prec=Perr[x,],
movc=split.along.dim(mov[x,,,,],4), SRrelc=SRrel[x],
Effind=Find[x,], Spat_targc=Spat_targ[x], hc=hs[x], R0c=R0a[x,],
SSBpRc=SSBpR[x,], aRc=aR[x,], bRc=bR[x,], MPA=MPA, maxF=maxF,
SSB0c=SSB0[x], plusgroup=plusgroup)
-opt$objective
}
# Input Control Functions Wrapper function for input control methods
#' Runs input control MPs on a Data object.
#'
#' Function runs a MP (or MPs) of class 'Input' and returns a list: input
#' control recommendation(s) in element 1 and Data object in element 2.
#'
#'
#' @usage runInMP(Data, MPs = NA, reps = 100)
#' @param Data A object of class Data
#' @param MPs A vector of MPs of class 'Input'
#' @param reps Number of stochastic repititions - often not used in input
#' control MPs.
#' @author A. Hordyk
#' @export
runInMP <- function(Data, MPs = NA, reps = 100) {
nsims <- length(Data@Mort)
if (.hasSlot(Data, "nareas")) {
nareas <- Data@nareas
} else {
nareas <- 2
}
nMPs <- length(MPs)
returnList <- list() # a list nMPs long containing MPs recommendations
recList <- list() # a list containing nsim recommendations from a single MP
if (!sfIsRunning() | (nMPs < 8 & nsims < 8)) {
for (ff in 1:nMPs) {
temp <- sapply(1:nsims, MPs[ff], Data = Data, reps = reps)
slots <- slotNames(temp[[1]])
for (X in slots) { # sequence along recommendation slots
if (X == "Misc") { # convert to a list nsim by nareas
rec <- lapply(temp, slot, name=X)
} else {
rec <- unlist(lapply(temp, slot, name=X))
}
if (X == "Spatial") { # convert to a matrix nsim by nareas
rec <- matrix(rec, nsims, nareas, byrow=TRUE)
}
recList[[X]] <- rec
for (x in 1:nsims) Data@Misc[[x]] <- recList$Misc[[x]]
recList$Misc <- NULL
}
returnList[[ff]] <- recList
}
} else {
sfExport(list = c("Data"))
for (ff in 1:nMPs) {
temp <- sfSapply(1:nsims, MPs[ff], Data = Data, reps = reps)
slots <- slotNames(temp[[1]])
for (X in slots) { # sequence along recommendation slots
if (X == "Misc") { # convert to a list nsim by nareas
rec <- lapply(temp, slot, name=X)
} else {
rec <- unlist(lapply(temp, slot, name=X))
}
if (X == "Spatial") { # convert to a matrix nsim by nareas
rec <- matrix(rec, nsims, nareas, byrow=TRUE)
}
recList[[X]] <- rec
for (x in 1:nsims) Data@Misc[[x]] <- recList$Misc[[x]]
recList$Misc <- NULL
}
returnList[[ff]] <- recList
}
}
return(list(returnList, Data))
}
projectEq <- function(x, Asize, nareas, maxage, N, pyears, M_ageArray, Mat_age, Wt_age,
V, retA, Perr, mov, SRrel, Find, Spat_targ, hs, R0a, SSBpR, aR, bR,
SSB0, B0, MPA, maxF, Nyrs, R0) {
simpop <- popdynCPP(nareas, maxage, Ncurr=N[x,,1,],
pyears, M_age=M_ageArray[x,,], Asize_c=Asize[x,],
MatAge=Mat_age[x,,],
WtAge=Wt_age[x,,], Vuln=V[x,,], Retc=retA[x,,], Prec=Perr[x,],
movc=split.along.dim(mov[x,,,,],4), SRrelc=SRrel[x],
Effind=Find[x,], Spat_targc=Spat_targ[x], hc=hs[x], R0c=R0a[x,],
SSBpRc=SSBpR[x,], aRc=aR[x,], bRc=bR[x,], Qc=0, Fapic=0, MPA=MPA,
maxF=maxF, control=3, SSB0c=SSB0[x])
simpop[[1]][,Nyrs,]
}
optDfun <- function(Perrmulti, x, initD, Nfrac, R0, Perr_y, surv,
Wt_age, SSB0, maxage) {
initRecs <- rev(Perr_y[x,1:maxage]) * exp(Perrmulti)
SSN <- Nfrac[x,] * R0[x] * initRecs # Calculate initial spawning stock numbers
SSB <- SSN * Wt_age[x,,1] # Calculate spawning stock biomass
(sum(SSB)/SSB0[x] - initD[x])^2
}
optDfunwrap <- function(x, initD, Nfrac, R0, initdist, Perr_y, surv,
Wt_age, SSB0, maxage) {
interval <- log(c(0.01, 10))
optD <- optimise(optDfun, interval=interval, x=x, initD=initD, Nfrac=Nfrac,
R0=R0, Perr_y=Perr_y, surv=surv,
Wt_age=Wt_age, SSB0=SSB0, maxage=maxage)
exp(optD$minimum)
}
# calcMSYRicker <- function(MSYyr, M_ageArray, Wt_age, retA, V, Perr_y, maxage,
# nareas, Mat_age, nsim, Asize, N, Spat_targ, hs,
# SRrel, mov, Find, R0a, SSBpR, aR, bR, SSB0,
# B0, maxF=maxF, cur.yr) {
# # Note: MSY and refY are calculated from total removals not total catch (different when Fdisc>0 and there is discarding)
# # Make arrays for future conditions assuming current conditions
# M_ageArrayp <- array(M_ageArray[,,cur.yr], dim=c(dim(M_ageArray)[1:2], MSYyr))
# Wt_agep <- array(Wt_age[,,cur.yr], dim=c(dim(Wt_age)[1:2], MSYyr))
# retAp <- array(retA[,,cur.yr], dim=c(dim(retA)[1:2], MSYyr))
# Vp <- array(V[,,cur.yr], dim=c(dim(V)[1:2], MSYyr))
# Perrp <- array(1, dim=c(dim(Perr_y)[1], MSYyr+maxage))
# noMPA <- matrix(1, nrow=MSYyr, ncol=nareas)
# Mat_agep <-abind::abind(rep(list(Mat_age[,,cur.yr]), MSYyr), along=3)
# # optimize for MSY reference points
# if (snowfall::sfIsRunning()) {
# MSYrefs <- snowfall::sfSapply(1:nsim, getFMSY3, Asize, nareas=nareas,
# maxage=maxage, N=N, pyears=MSYyr,
# M_ageArray=M_ageArrayp, Mat_age=Mat_agep,
# Wt_age=Wt_agep, V=Vp, retA=retAp,
# Perr=Perrp, mov=mov, SRrel=SRrel,
# Find=Find, Spat_targ=Spat_targ, hs=hs,
# R0a=R0a, SSBpR=SSBpR, aR=aR, bR=bR, SSB0=SSB0,
# B0=B0, MPA=noMPA, maxF=maxF)
# } else {
# MSYrefs <- sapply(1:nsim, getFMSY3, Asize, nareas=nareas, maxage=maxage,
# N=N, pyears=MSYyr, M_ageArray=M_ageArrayp, Mat_age=Mat_agep,
# Wt_age=Wt_agep, V=Vp, retA=retAp,Perr=Perrp, mov=mov,
# SRrel=SRrel, Find=Find, Spat_targ=Spat_targ, hs=hs,
# R0a=R0a, SSBpR=SSBpR, aR=aR, bR=bR, SSB0=SSB0, B0=B0,
# MPA=noMPA, maxF=maxF)
# }
# MSYrefs
# }
# #' Apply output control recommendations and calculate population dynamics
# #'
# #' @param y Projection year
# #' @param Asize relative size of areas (matrix nsim by nareas)
# #' @param TACused TAC recommendation
# #' @param TAC_f Implementation error on TAC
# #' @param lastCatch Catch from last year
# #' @param availB Total available biomass
# #' @param maxF Maximum fishing mortality
# #' @param Biomass_P Numeric array (nsim, maxage, proyears, nareas) with Biomass at age
# #' @param VBiomass_P Numeric array (nsim, maxage, proyears, nareas) with Vulnerable Biomass at age
# #' @param CB_P Numeric array (nsim, maxage, proyears, nareas) with Catch Biomass at age
# #' @param CB_Pret Numeric array (nsim, maxage, proyears, nareas) with Retained catch biomass at age
# #' @param FM_P Numeric array (nsim, maxage, proyears, nareas) with fishing mortality at age
# #' @param Z_P Numeric array (nsim, maxage, proyears, nareas) with total mortality at age
# #' @param Spat_targ Spatial targetting
# #' @param V_P Numeric array(nsim, maxage, nyears+proyears) with vulnerability at age
# #' @param retA_P Numeric array(nsim, maxage, nyears+proyears) with retention at age
# #' @param M_ageArray Numeric array (nsim, maxage, nyears+proyears) Natural mortality at age
# #' @param qs Catchability coefficient
# #' @param nyears Number of historical years
# #' @param nsim Number of simulations
# #' @param maxage Maximum age
# #' @param nareas Number of areas
# #'
# # #' @export
# #'
# #' @author A. Hordyk
# #'
# CalcOutput <- function(y, Asize, TACused, TAC_f, lastCatch, availB, maxF, Biomass_P, VBiomass_P, CB_P, CB_Pret,
# FM_P, Z_P, Spat_targ, V_P, retA_P, M_ageArray, qs, nyears, nsim, maxage, nareas) {
# SAYRL <- as.matrix(expand.grid(1:nsim, 1:maxage, nyears, 1:nareas)) # Final historical year
# SAYRt <- as.matrix(expand.grid(1:nsim, 1:maxage, y + nyears, 1:nareas)) # Trajectory year
# SAYR <- as.matrix(expand.grid(1:nsim, 1:maxage, y, 1:nareas))
# SYt <- SAYRt[, c(1, 3)]
# SAYt <- SAYRt[, 1:3]
# SR <- SAYR[, c(1, 4)]
# SA1 <- SAYR[, 1:2]
# S1 <- SAYR[, 1]
# SY1 <- SAYR[, c(1, 3)]
# SAY1 <- SAYR[, 1:3]
# SYA <- as.matrix(expand.grid(1:nsim, 1, 1:maxage)) # Projection year
# SY <- SYA[, 1:2]
# SA <- SYA[, c(1, 3)]
# SAY <- SYA[, c(1, 3, 2)]
# S <- SYA[, 1]
#
# TACused[is.na(TACused)] <- lastCatch[is.na(TACused)] # if MP returns NA - TAC is set to catch from last year
#
# TACrec <- TACused # TAC recommendation
# TACusedE<- TAC_f[,y]*TACused # TAC taken after implementation error
#
# maxC <- (1 - exp(-maxF)) * availB # maximum catch given maxF
# TACusedE[TACusedE > maxC] <- maxC[TACusedE > maxC] # apply maxF limit - catch can't be higher than maxF * vulnerable biomass
#
# # fishdist <- (apply(VBiomass_P[, , y, ], c(1, 3), sum)^Spat_targ)/
# # apply(apply(VBiomass_P[, , y, ], c(1, 3), sum)^Spat_targ, 1, mean) # spatial preference according to spatial biomass
#
# fishdist <- (apply(VBiomass_P[, , y, ], c(1, 3), sum)^Spat_targ)/
# apply(apply(VBiomass_P[, , y, ], c(1, 3), sum)^Spat_targ, 1, sum) # spatial preference according to spatial biomass
#
#
#
# # If there is discard mortality, actual removals are higher than TACused
# # calculate distribution of all effort
# CB_P[SAYR] <- (Biomass_P[SAYR] * V_P[SAYt] * fishdist[SR])/Asize[SR] # ignore magnitude of effort or q increase (just get distribution across age and fishdist across space
# # calculate distribution of retained effort
# CB_Pret[SAYR] <- (Biomass_P[SAYR] * retA_P[SAYt] * fishdist[SR])/Asize[SR] # ignore magnitude of effort or q increase (just get distribution across age and fishdist across space
#
# retained <- apply(CB_Pret[,,y,], 1, sum)
# actualremovals <- apply(CB_P[,,y,], 1, sum)
#
# ratio <- actualremovals/retained # ratio of actual removals to retained catch
#
# temp <- CB_Pret[, , y, ]/apply(CB_Pret[, , y, ], 1, sum) # distribution of retained fish
# CB_Pret[, , y, ] <- TACusedE * temp # retained catch
#
# temp <- CB_P[, , y, ]/apply(CB_P[, , y, ], 1, sum) # distribution of removals
# CB_P[,,y,] <- TACusedE * ratio * temp # scale up total removals
#
# temp <- CB_P[SAYR]/(Biomass_P[SAYR] * exp(-M_ageArray[SAYt]/2)) # Pope's approximation
# temp[temp > (1 - exp(-maxF))] <- 1 - exp(-maxF)
#
# FM_P[SAYR] <- -log(1 - temp)
#
# # calcFs <- lapply(1:nsim, getFs, y=y, Vuln=V_P, CB=CB_P, Bio=Biomass_P, Mage=M_ageArray, Fdist=fishdist,
# # maxage=maxage, nareas=nareas, nyears=nyears) # numerically calculate Fs
# #
# #
# # FM_P[,,y,] <- aperm(array(unlist(calcFs, use.names=FALSE), dim=c(maxage, nareas, nsim)), c(3, 1, 2))
# # FM_P[,,y,][FM_P[,,y,] > (1-exp(-maxF))] <- 1 - exp(-maxF)
#
# Z_P[SAYR] <- FM_P[SAYR] + M_ageArray[SAYt]
#
# Effort <- (-log(1 - apply(CB_P[, , y, ], 1, sum)/(apply(CB_P[, , y, ], 1, sum) +
# apply(VBiomass_P[, , y, ], 1, sum))))/qs
# out <- list()
# out$Z_P <- Z_P
# out$FM_P <- FM_P
# out$CB_P <- CB_P
# out$CB_Pret <- CB_Pret
# out$TACused <- TACused
# out$TACrec <- TACrec
# out$Effort <- Effort
# out
# }
# #' Internal function to calculate F-at-age given catch and biomass
# #'
# #' @param x Simulation
# #' @param y year
# #' @param Vuln Vulnerabilty
# #' @param CB Catch biomass
# #' @param Bio Biomass
# #' @param Mage M-at-age
# #' @param Fdist Fishing distribution
# #' @param maxage Maximum age
# #' @param nareas Number of areas
# #' @param nyears Number of historical years
# #' @keywords internal
# #'
# #' @export
# #'
# #' @author A. Hordyk
# getFs <- function(x, y, Vuln, CB, Bio, Mage, Fdist, maxage, nareas, nyears) {
#
# doopt <- optimize(optF, interval=log(c(0.01, 10)), Vuln[x,,nyears+y], CB[x,,y,],
# Bio[x,,y,], Mage[x,,y+nyears], Fdist[x,], maxage,nareas)
#
# ind <- as.matrix(expand.grid(x, 1:maxage, 1:nareas))
# ind2 <- as.matrix(expand.grid(1, 1:maxage, 1:nareas))
# FM <- array(NA, dim=c(1, maxage, nareas))
# FM[ind2] <- exp(doopt$minimum) * Vuln[ind] * Fdist[ind[,c(1,3)]]
# FM
# }
#
# #' Internal function to optimize for F
# #'
# #' @param fapic Apical fishing mortality
# #' @param vuln Vulnerability
# #' @param catch Catch
# #' @param bio Biomass
# #' @param mort Natural mortality
# #' @param fdist Fishing distribution
# #' @param maxage Maximum age
# #' @param nareas Number of areas
# #'
# #' @export
# #'
# #' @author A. Hordyk
# optF <- function(fapic, vuln, catch, bio, mort, fdist, maxage, nareas) {
# FM <- array(NA, dim=c(maxage, nareas))
# ind <- as.matrix(expand.grid(1:maxage, 1:nareas))
# FM[ind] <- exp(fapic) * vuln[ind[,1]] * fdist[ind[,2]]
#
# # FM[ind] <- (exp(fapic) * vuln[ind[,1]] * fdist[ind[,2]]) / area_size[ind[,2]]
#
# Z <- FM + mort
#
# pCatch <- FM/Z * bio* (1-exp(-Z))
# (log(sum(pCatch)) - log(sum(catch)))^2
#
# }
# #' Apply input control recommendations and calculate population dynamics
# #'
# #' Internal function
# #'
# #' @param y Simulation year
# #' @param Asize Matrix (nsim by nareas) with relative size of areas
# #' @param nyears Number of historical
# #' @param proyears Number of projection years
# #' @param InputRecs Input control recommendations
# #' @param nsim Number of simulations
# #' @param nareas Number of areas
# #' @param LR5_P Length at 5 percent retention
# #' @param LFR_P Length at full retention
# #' @param Rmaxlen_P Retention of maximum length
# #' @param maxage Maximum age
# #' @param retA_P Retention at age
# #' @param retL_P Retention at length
# #' @param V_P Realized vulnerability at age
# #' @param V2 Gear vulnerability at age
# #' @param pSLarray Realized vulnerability at length
# #' @param SLarray2 Gear vulnerability at length
# #' @param DR Discard ratio
# #' @param maxlen maximum length
# #' @param Len_age Length-at-age
# #' @param CAL_binsmid Length-bin mid-points
# #' @param Fdisc Fraction of discarded fish that die
# #' @param nCALbins Number of length bins
# #' @param E_f Implementation error on effort recommendation
# #' @param SizeLim_f Implementation error on size limit
# #' @param VBiomass_P Vulnerable biomass-at-age
# #' @param Biomass_P Biomass-at-age
# #' @param Spat_targ Spatial targetting
# #' @param FinF Final fishing effort
# #' @param qvar Annual ariability in catchability
# #' @param qs Catchability
# #' @param qinc Numeric vector (nsim) increased
# #' @param CB_P Numeric array (nsim, maxage, proyears, nareas) Catch biomass at age
# #' @param CB_Pret Numeric array (nsim, maxage, proyears, nareas) Retained catch biomass at age
# #' @param FM_P Numeric array (nsim, maxage, proyears, nareas) Fishing mortality at age
# #' @param FM_retain Numeric array (nsim, maxage, proyears, nareas) Retained fishing mortality at age
# #' @param Z_P Numeric array (nsim, maxage, proyears, nareas) Total mortality at age
# #' @param M_ageArray Numeric array (nsim, maxage, nyears+proyears) Natural mortality at age
# #' @param LastEffort Numeric vector (nsim) with fishing effort from last year
# #' @param LastSpatial Numeric matrix (nsim, nareas) with spatial closures from last year
# #' @param LastAllocat Numeric vector (nsim) with allocation from last year
# #'
# #' @keywords internal
# #' @export
# #'
# #' @author A. Hordyk
# #'
# CalcInput <- function(y, Linf, Asize, nyears, proyears, InputRecs, nsim, nareas, LR5_P, LFR_P,
# Rmaxlen_P, maxage, retA_P, retL_P, V_P, V2, pSLarray,
# SLarray2, DR, maxlen, Len_age, CAL_binsmid, Fdisc,
# nCALbins, E_f, SizeLim_f, VBiomass_P, Biomass_P, Spat_targ,
# FinF, qvar, qs, qinc, CB_P, CB_Pret, FM_P, FM_retain, Z_P,
# M_ageArray, LastEffort, LastSpatial, LastAllocat) {
#
# SAYRL <- as.matrix(expand.grid(1:nsim, 1:maxage, nyears, 1:nareas)) # Final historical year
# SAYRt <- as.matrix(expand.grid(1:nsim, 1:maxage, y + nyears, 1:nareas)) # Trajectory year
# SAYR <- as.matrix(expand.grid(1:nsim, 1:maxage, y, 1:nareas))
# SYt <- SAYRt[, c(1, 3)]
# SAYt <- SAYRt[, 1:3]
# SR <- SAYR[, c(1, 4)]
# SA1 <- SAYR[, 1:2]
# S1 <- SAYR[, 1]
# SY1 <- SAYR[, c(1, 3)]
# SAY1 <- SAYR[, 1:3]
# SYA <- as.matrix(expand.grid(1:nsim, 1, 1:maxage)) # Projection year
# SY <- SYA[, 1:2]
# SA <- SYA[, c(1, 3)]
# SAY <- SYA[, c(1, 3, 2)]
# S <- SYA[, 1]
#
# # Change in Effort
# if (length(InputRecs$Effort) == 0) { # no effort recommendation
# if (y==1) Ei <- LastEffort * E_f[,y] # effort is unchanged but has implementation error
# if (y>1) Ei <- LastEffort / E_f[,y-1] * E_f[,y] # effort is unchanged but has implementation error
# } else if (length(InputRecs$Effort) != nsim) {
# stop("Effort recommmendation is not 'nsim' long.\n Does MP return Effort recommendation under all conditions?")
# } else {
# Ei <- InputRecs$Effort * E_f[,y] # effort adjustment with implementation error
# }
#
# # Spatial
# if (all(is.na(InputRecs$Spatial))) { # no spatial recommendation
# Si <- LastSpatial # matrix(1, nsim, nareas) # spatial is unchanged - modify this if spatial closure in historical years
# } else if (any(is.na(InputRecs$Spatial))) {
# stop("Spatial recommmendation has some NAs.\n Does MP return Spatial recommendation under all conditions?")
# } else {
# Si <-InputRecs$Spatial # change spatial fishing
# }
#
# # Allocation
# if (length(InputRecs$Allocate) == 0) { # no allocation recommendation
# Ai <- LastAllocat # rep(0, nsim) # allocation is unchanged
# } else if (length(InputRecs$Allocate) != nsim) {
# stop("Allocate recommmendation is not 'nsim' long.\n Does MP return Allocate recommendation under all conditions?")
# } else {
# Ai <- InputRecs$Allocate # change in spatial allocation
# }
# # Retention Curve
# RetentFlag <- FALSE
# # LR5
# if (length(InputRecs$LR5) == 0) { # no recommendation
# LR5_P[(y + nyears):(nyears+proyears),] <- matrix(LR5_P[y + nyears-1,],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # unchanged
#
# } else if (length(InputRecs$LR5) != nsim) {
# stop("LR5 recommmendation is not 'nsim' long.\n Does MP return LR5 recommendation under all conditions?")
# } else {
# LR5_P[(y + nyears):(nyears+proyears),] <- matrix(InputRecs$LR5 * SizeLim_f[,y],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # recommendation with implementation error
# RetentFlag <- TRUE
# }
# # LFR
# if (length(InputRecs$LFR) == 0) { # no recommendation
# LFR_P[(y + nyears):(nyears+proyears),] <- matrix(LFR_P[y + nyears-1,],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # unchanged
# } else if (length(InputRecs$LFR) != nsim) {
# stop("LFR recommmendation is not 'nsim' long.\n Does MP return LFR recommendation under all conditions?")
# } else {
# LFR_P[(y + nyears):(nyears+proyears),] <- matrix(InputRecs$LFR * SizeLim_f[,y],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # recommendation with implementation error
# RetentFlag <- TRUE
# }
# # Rmaxlen
# if (length(InputRecs$Rmaxlen) == 0) { # no recommendation
# Rmaxlen_P[(y + nyears):(nyears+proyears),] <- matrix(Rmaxlen_P[y + nyears-1,],
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # unchanged
#
# } else if (length(Rmaxlen) != nsim) {
# stop("Rmaxlen recommmendation is not 'nsim' long.\n Does MP return Rmaxlen recommendation under all conditions?")
# } else {
# Rmaxlen_P[(y + nyears):(nyears+proyears),] <- matrix(InputRecs$Rmaxlen,
# nrow=(length((y + nyears):(nyears+proyears))),
# ncol=nsim, byrow=TRUE) # recommendation
# RetentFlag <- TRUE
# }
# # HS - harvest slot
#
# if (length(InputRecs$HS) == 0) { # no recommendation
# HS <- rep(1E5, nsim) # no harvest slot
# } else if (length(InputRecs$HS) != nsim) {
# stop("HS recommmendation is not 'nsim' long.\n Does MP return HS recommendation under all conditions?")
# } else {
# HS <- InputRecs$HS * SizeLim_f[,y] # recommendation
# RetentFlag <- TRUE
# }
# # Change in retention - update vulnerability and retention curves
# if (RetentFlag) {
# yr <- y+nyears
# allyrs <- (y+nyears):(nyears+proyears) # update vulnerabilty for all future years
#
# srs <- (Linf - LFR_P[yr,]) / ((-log(Rmaxlen_P[yr,],2))^0.5) # selectivity parameters are constant for all years
# sls <- (LFR_P[yr,] - LR5_P[yr,]) / ((-log(0.05,2))^0.5)
#
# CAL_binsmidMat <- matrix(CAL_binsmid, nrow=nsim, ncol=length(CAL_binsmid), byrow=TRUE)
# relLen <- t(sapply(1:nsim, getsel, lens=CAL_binsmidMat, lfs=LFR_P[yr,], sls=sls, srs=srs))
#
# for (yy in allyrs) {
# # calculate new retention at age curve
# retA_P[ , , yy] <- t(sapply(1:nsim, getsel, lens=Len_age[,,yy], lfs=LFR_P[yy,], sls=sls, srs=srs))
#
# # calculate new retention at length curve
# retL_P[,, yy] <- relLen
# }
#
# # upper harvest slot
# aboveHS <- Len_age[,,allyrs]>HS
# tretA_P <- retA_P[,,allyrs]
# tretA_P[aboveHS] <- 0
# retA_P[,,allyrs] <- tretA_P
# for (ss in 1:nsim) {
# index <- which(CAL_binsmid >= HS[ss])
# retL_P[ss, index, allyrs] <- 0
# }
#
# dr <- aperm(abind::abind(rep(list(DR), maxage), along=3), c(2,3,1))
# retA_P[,,allyrs] <- (1-dr[,,yr]) * retA_P[,,yr]
# dr <- aperm(abind::abind(rep(list(DR), nCALbins), along=3), c(2,3,1))
# retL_P[,,allyrs] <- (1-dr[,,yr]) * retL_P[,,yr]
#
# # update realized vulnerablity curve with retention and dead discarded fish
# Fdisc_array1 <- array(Fdisc, dim=c(nsim, maxage, length(allyrs)))
#
# V_P[,,allyrs] <- V2[,,allyrs] * (retA_P[,,allyrs] + (1-retA_P[,,allyrs])*Fdisc_array1)
#
# Fdisc_array2 <- array(Fdisc, dim=c(nsim, nCALbins, length(allyrs)))
# pSLarray[,,allyrs] <- SLarray2[,,allyrs] * (retL_P[,,allyrs]+ (1-retL_P[,,allyrs])*Fdisc_array2)
#
# # Realised Retention curves
# retA_P[,,allyrs] <- retA_P[,,allyrs] * V_P[,,allyrs]
# retL_P[,,allyrs] <- retL_P[,,allyrs] * pSLarray[,,allyrs]
#
# }
#
#
# newVB <- apply(Biomass_P[, , y, ] * V_P[SAYt], c(1, 3), sum) # calculate total vuln biomass by area
# # fishdist <- (newVB^Spat_targ)/apply(newVB^Spat_targ, 1, mean) # spatial preference according to spatial vulnerable biomass
# fishdist <- (newVB^Spat_targ)/apply(newVB^Spat_targ, 1, sum) # spatial preference according to spatial vulnerable biomass
# Emult <- 1 + ((2/apply(fishdist * Si, 1, sum)) - 1) * Ai # allocate effort to new area according to fraction allocation Ai
#
# # fishing mortality with input control recommendation
# FM_P[SAYR] <- (FinF[S1] * Ei[S1] * V_P[SAYt] * Si[SR] * fishdist[SR] * Emult[S1] * qvar[SY1] * (qs[S1]*(1 + qinc[S1]/100)^y))/Asize[SR]
#
# # retained fishing mortality with input control recommendation
# FM_retain[SAYR] <- (FinF[S1] * Ei[S1] * retA_P[SAYt] * Si[SR] * fishdist[SR] * Emult[S1] * qvar[SY1] * qs[S1]*(1 + qinc[S1]/100)^y)/Asize[SR]
#
# VBiomass_P[SAYR] <- Biomass_P[SAYR] * V_P[SAYt] # update vulnerable biomass
# Z_P[SAYR] <- FM_P[SAYR] + M_ageArray[SAYt] # calculate total mortality
#
# CB_P[SAYR] <- FM_P[SAYR]/Z_P[SAYR] * Biomass_P[SAYR] * (1 - exp(-Z_P[SAYR]))
# CB_Pret[SAYR] <- FM_retain[SAYR]/Z_P[SAYR] * Biomass_P[SAYR] * (1 - exp(-Z_P[SAYR]))
#
# out <- list()
# out$Z_P <- Z_P
# out$FM_P <- FM_P
# out$FM_retain <- FM_retain
# out$CB_P <- CB_P
# out$CB_Pret <- CB_Pret
# out$Effort <- Ei
# out$retA_P <- retA_P
# out$retL_P <- retL_P
# out$V_P <- V_P
# out$pSLarray <- pSLarray
# out$Si <- Si
# out$Ai <- Ai
# out
#
# }
|
testlist <- list(A = structure(c(2.31584307392677e+77, 5.17065910579704e+276, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613098850-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 343 | r | testlist <- list(A = structure(c(2.31584307392677e+77, 5.17065910579704e+276, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
#' Rsquare
#'
#' R squared (coefficient of determination)
#'
#' @details Formula used: \code{\link{cor}(a,b)^2}
#'
#' @return Numeric.
#' @note Using cor is much faster than using\cr \code{ aa <- a-mean(a); bb <- b-mean(b); sum(aa*bb)^2/sum(aa^2)/sum(bb^2)}
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}, 2014
#' @seealso \code{\link{rmse}}, \code{\link{cor}}, \code{\link{lm}}
#' @references \url{http://en.wikipedia.org/wiki/R-squared}
#' @keywords univar
#' @export
#' @examples
#'
#' x <- rnorm(20)
#' y <- 2*x + rnorm(20)
#' plot(x,y)
#' rsquare(x,y)
#'
#' r2 <- sapply(1:10000, function(i){
#' x <- rnorm(20); y <- 2*x + rnorm(20); rsquare(x,y) })
#' hist(r2, breaks=70, col=5,
#' main= "10'000 times x <- rnorm(20); y <- 2*x + rnorm(20); rsquare(x,y)")
#'
#' @param a Vector with values.
#' @param b Another vector of the same length.
#' @param quiet Should NA-removal warnings be suppressed? Helpful within functions. DEFAULT: FALSE
#'
rsquare <- function(
a,
b,
quiet=FALSE)
{
if(!(is.vector(a) & is.vector(b))) stop("input is not vectors")
if(length(a) != length(b)) stop("vectors not of equal length")
if(any(is.na(a)|is.na(b)))
{
Na <- which(is.na(a)|is.na(b))
if(!quiet) warning(length(Na), " NAs were omitted from ", length(a), " data points.")
a <- a[-Na] ; b <- b[-Na]
} # end if NA
if(all(a==0) | all(b==0))
{
if(!quiet) warning("all a (or all b) values are zero, returning NA.")
return(NA)
} # end if zero
cor(a,b)^2
}
if(FALSE)
{
# alternative, slower (3.4 instead of 2.1 seconds in the example below)
# crucial, if calculations are done iteratively or performed multiple times
rsquare2 <- function(a,b) {
if(!(is.vector(a) & is.vector(b))) stop("input is not vectors")
if(length(a) != length(b)) stop("vectors not of equal length")
if(any(is.na(a)|is.na(b)))
{ warning("NAs were omitted")
Na <- which(is.na(a)|is.na(b))
a <- a[-Na] ; b <- b[-Na]
} # end if NA
aa <- a-mean(a)
bb <- b-mean(b)
sum(aa*bb)^2/sum(aa^2)/sum(bb^2) }
a <- sort(rnorm(1e8)); b <- 2*a+3+rnorm(length(a))
system.time(rsquare(a,b))
system.time(rsquare2(a,b))
}
| /berryFunctions/R/rsquare.R | no_license | ingted/R-Examples | R | false | false | 2,221 | r | #' Rsquare
#'
#' R squared (coefficient of determination)
#'
#' @details Formula used: \code{\link{cor}(a,b)^2}
#'
#' @return Numeric.
#' @note Using cor is much faster than using\cr \code{ aa <- a-mean(a); bb <- b-mean(b); sum(aa*bb)^2/sum(aa^2)/sum(bb^2)}
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}, 2014
#' @seealso \code{\link{rmse}}, \code{\link{cor}}, \code{\link{lm}}
#' @references \url{http://en.wikipedia.org/wiki/R-squared}
#' @keywords univar
#' @export
#' @examples
#'
#' x <- rnorm(20)
#' y <- 2*x + rnorm(20)
#' plot(x,y)
#' rsquare(x,y)
#'
#' r2 <- sapply(1:10000, function(i){
#' x <- rnorm(20); y <- 2*x + rnorm(20); rsquare(x,y) })
#' hist(r2, breaks=70, col=5,
#' main= "10'000 times x <- rnorm(20); y <- 2*x + rnorm(20); rsquare(x,y)")
#'
#' @param a Vector with values.
#' @param b Another vector of the same length.
#' @param quiet Should NA-removal warnings be suppressed? Helpful within functions. DEFAULT: FALSE
#'
rsquare <- function(
a,
b,
quiet=FALSE)
{
if(!(is.vector(a) & is.vector(b))) stop("input is not vectors")
if(length(a) != length(b)) stop("vectors not of equal length")
if(any(is.na(a)|is.na(b)))
{
Na <- which(is.na(a)|is.na(b))
if(!quiet) warning(length(Na), " NAs were omitted from ", length(a), " data points.")
a <- a[-Na] ; b <- b[-Na]
} # end if NA
if(all(a==0) | all(b==0))
{
if(!quiet) warning("all a (or all b) values are zero, returning NA.")
return(NA)
} # end if zero
cor(a,b)^2
}
if(FALSE)
{
# alternative, slower (3.4 instead of 2.1 seconds in the example below)
# crucial, if calculations are done iteratively or performed multiple times
rsquare2 <- function(a,b) {
if(!(is.vector(a) & is.vector(b))) stop("input is not vectors")
if(length(a) != length(b)) stop("vectors not of equal length")
if(any(is.na(a)|is.na(b)))
{ warning("NAs were omitted")
Na <- which(is.na(a)|is.na(b))
a <- a[-Na] ; b <- b[-Na]
} # end if NA
aa <- a-mean(a)
bb <- b-mean(b)
sum(aa*bb)^2/sum(aa^2)/sum(bb^2) }
a <- sort(rnorm(1e8)); b <- 2*a+3+rnorm(length(a))
system.time(rsquare(a,b))
system.time(rsquare2(a,b))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.autoscaling_operations.R
\name{describe_load_balancer_target_groups}
\alias{describe_load_balancer_target_groups}
\title{Describes the target groups for the specified Auto Scaling group}
\usage{
describe_load_balancer_target_groups(AutoScalingGroupName,
NextToken = NULL, MaxRecords = NULL)
}
\arguments{
\item{AutoScalingGroupName}{[required] The name of the Auto Scaling group.}
\item{NextToken}{The token for the next set of items to return. (You received this token from a previous call.)}
\item{MaxRecords}{The maximum number of items to return with this call. The default value is 100 and the maximum value is 100.}
}
\description{
Describes the target groups for the specified Auto Scaling group.
}
\section{Accepted Parameters}{
\preformatted{describe_load_balancer_target_groups(
AutoScalingGroupName = "string",
NextToken = "string",
MaxRecords = 123
)
}
}
\examples{
# This example describes the target groups attached to the specified Auto
# Scaling group.
\donttest{describe_load_balancer_target_groups(
AutoScalingGroupName = "my-auto-scaling-group"
)}
}
| /service/paws.autoscaling/man/describe_load_balancer_target_groups.Rd | permissive | CR-Mercado/paws | R | false | true | 1,166 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.autoscaling_operations.R
\name{describe_load_balancer_target_groups}
\alias{describe_load_balancer_target_groups}
\title{Describes the target groups for the specified Auto Scaling group}
\usage{
describe_load_balancer_target_groups(AutoScalingGroupName,
NextToken = NULL, MaxRecords = NULL)
}
\arguments{
\item{AutoScalingGroupName}{[required] The name of the Auto Scaling group.}
\item{NextToken}{The token for the next set of items to return. (You received this token from a previous call.)}
\item{MaxRecords}{The maximum number of items to return with this call. The default value is 100 and the maximum value is 100.}
}
\description{
Describes the target groups for the specified Auto Scaling group.
}
\section{Accepted Parameters}{
\preformatted{describe_load_balancer_target_groups(
AutoScalingGroupName = "string",
NextToken = "string",
MaxRecords = 123
)
}
}
\examples{
# This example describes the target groups attached to the specified Auto
# Scaling group.
\donttest{describe_load_balancer_target_groups(
AutoScalingGroupName = "my-auto-scaling-group"
)}
}
|
# Select samples for Whole Exome Sequencing
# multiplex families
# affecteds
# most severe clinical course
# earliest onset
# good DNA (ie whole blood, good ABS ratio, non-extreme concentration)
# have HumanOmni1-Quad data
# part of trios favored over duos
# HPV typing available favored over not
library(ggplot2)
source(file="Multiplex family germline.r")# brings in nuc.ac.multiplex which is nuc.ac.nr of all the those in the multiplex family
mendel.error.fam <- c("BJW18006", "DET10003", "FJB01117", "FKK24002", "GHP04013", "JEM04008", "JWT08002", "PXC08007")#families that were deleted from the TDTae alogrithm "delete 8 families with highest Mendel errors from the dataset", https://mail.google.com/mail/u/0/#apps/subject%3A(Some+additional+details+on+the+plink+run+)/n25/1358bc48a0e710bb
express.rank <- affected[,list(PtCode, dxage, rankage = rankage <- frank(dxage), aggr.max.freq, rank.max.freq = rank.max.freq <- rank(-aggr.max.freq), avg.annual.frq, rank.annual.freq = rank.annual.freq <- rank(-avg.annual.frq), aggr.distal, rank.distal = rank.distal <- rank(aggr.distal), aggr.tracheostomy, rank.tracheost = rank.tracheost <- rank(aggr.tracheostomy),aggr.surgcount, rank.surgcount = rank.surgcount <- rank(-aggr.surgcount),hpv, rank.express=(aggr.max.freq*(1+avg.annual.frq)*(1+aggr.surgcount)*rank.tracheost*rank.distal)/(dxage))]#rank.express provides a rank (higher is more expressive) of the expressiviity of any susceptibility. They are people who were diagnosed the youngest and had the most aggressive clinical course. Added a 1 to avg.annual.frq and aggr.surgcount when calculating rank.express since some zeros messed with the multiplication.
express.rank[PtCode=="RYS16028PT", rank.express:=NA]
express.rank[order(-rank.express)][seq(from=1, to=0.5*.N, by = 10 )]
express.rank[is.na(rank.express), rank.express:=signif(quantile(express.rank$rank.express, na.rm = TRUE, probs = 0.1), digits = 2)]# some ranks are NA because there was just one piece of missing data. I therefore assigned all of them to have the 10th percentile rank.exome score
express.rank[rank.express==0]
DNA.rank.WB <- all.subj.byspec[TissCode=="WB",list(PtCode, relationship, nuc.ac.nr,dnaconcngpmicl, absratio, absratiolow, rank.absratio=rank.absratio <- rank(absratiolow), dnayieldmicrog, yieldlow = yieldlow <- ifelse(-scale(log(dnayieldmicrog))<1,1,-scale(log(dnayieldmicrog))), dna.rank=1/(yieldlow*(0.1+absratiolow)), TissCode)] #DNA.rank provides a rank (higher number is more favorable) of our DNA stock solutions. We chose the dna stock yield that was within 1 sd of the log dnayield or above.
all.subj.byspec[TissCode=="WB",list(dnayieldmicrog, ifelse(-scale(log(dnayieldmicrog))<1,1,-scale(log(dnayieldmicrog))))]
DNA.rank.WB[order(-dna.rank)][seq(from=1, to=.N, by = 15 )]
DNA.rank.MW <- all.subj.byspec[TissCode=="MW",list(PtCode, relationship, nuc.ac.nr,dnaconcngpmicl, absratio, absratiolow, rank.absratio=rank.absratio <- rank(absratiolow), dnayieldmicrog, dna.rank=dnayieldmicrog/(0.1+absratiolow), TissCode)] #DNA.rank provides a rank (higher number is more favorable) of our DNA stock solutions. For MW it is calculated differently. Here we do not favor the average yield but rather the hihgest yield.
DNA.rank.MW[dnayieldmicrog<=0, dna.rank:=0.0001]
DNA.rank.MW[order(-dna.rank)][seq(from=1, to=.N, by = 15 )]
#did dnayield from mw improveover time
ggplot(all.subj.byspec[TissCode=="MW"], aes(x=DNAextract.date, y=log2(dnayieldmicrog))) + geom_point() + stat_smooth(method = "lm")
summary(lm(log2(dnayieldmicrog)~DNAextract.date, data =all.subj.byspec[TissCode=="MW"]))
# library(BayesianFirstAid)
# fit2 <- bayes.t.test(log2(dnayieldmicrog)~DNAextract.date>as.IDate("2009-01-01"), data=all.subj.byspec[TissCode=="MW"])
# summary(fit2)
# plot(fit2)
t.test(log2(dnayieldmicrog)~DNAextract.date>as.IDate("2009-01-01"), data=all.subj.byspec[TissCode=="MW"])
ggplot(data=all.subj.byspec[TissCode=="MW"], aes(x=DNAextract.date>as.IDate("2009-01-01"), y=log2(dnayieldmicrog))) + geom_boxplot()
mw.stock <- subset(all.subj.byspec, TissCode=="MW", c(dnayieldmicrog,DNAextract.date) )
mw.stock[, extract.era:=ifelse(DNAextract.date>as.IDate("2009-01-01"), "since2009", "before2009")]
ggplot(data=mw.stock, aes(x=log2(dnayieldmicrog))) + geom_histogram(binwidth = 0.8) + facet_grid(extract.era~.) + aes(y = ..density..)
l.all.germ <- list(DNA.rank.WB, DNA.rank.MW)
DNA.rank <- rbindlist(l.all.germ,use.names=TRUE, fill=TRUE)
DNA.rank[, dna.rank:=dna.rank*(rank(TissCode)^2)]#dna.rank therefore accounts for best ratio and good yield and the type of tissue that it was extracted from where WB is better than mouthwash
DNA.rank[order(-dna.rank)][seq(from=1, to=.N, by = 25 )]
ggplot(all.subj.byspec[TissCode!="BUC"], aes(x=log10(dnayieldmicrog))) + geom_histogram(binwidth=0.25) + facet_grid(TissCode~.) + aes(y = ..density..) + ggtitle("Yield of DNA from each tissue type")
illumina.omni <- rpinfinwrk.dt[day2blue=="Y",nuc.ac.nr]# nuc.ac.nr of all specimens that we have illumina.omni data on
setkey(DNA.rank, PtCode)
setkey(express.rank, PtCode)
ordered.exome <- merge(express.rank, DNA.rank[relationship=="PT",.SD, .SDcols=-relationship])#do not want the DNA specimens from relations getting in here therefore filter for affected patients only
ordered.exome[,c("rankage", "rank.max.freq", "rank.annual.freq", "rank.distal", "rank.tracheost", "rank.surgcount", "absratiolow", "rank.absratio"):= NULL]
# get a field in for illumina genotyping
ordered.exome[, illumina:="noillumina"]
ordered.exome[nuc.ac.nr %in% illumina.omni, illumina:="haveillumina"]
ordered.exome[, rank.illumina:=rank(illumina)]#lower number is better so it should go in the denominator
# get a field in for trio vs duo vs solo
fam.dt <- all.subj.byspec[,list(father=any(relationship=="FA"), mother=any(relationship=="MO")),by=family]# fam.dt is a data.table to put the family status in one table
fam.dt[, PtCode:=paste0(family,"PT")]
fam.dt[, trio.duo.sol:=factor(1+father+mother, labels = c("solo", "duo", "trio"))]
setkey(fam.dt,PtCode)
ordered.exome[fam.dt, trio.duo.sol:= i.trio.duo.sol]# read about this at data.table join then add columns to existing data.frame without re-copy at http://stackoverflow.com/questions/19553005/data-table-join-then-add-columns-to-existing-data-frame-without-re-copy
# add a score for HPV type to multiply to numerator to find best affecteds to send for whole exome sequencing.
ordered.exome[,rank.hpv:=1]
ordered.exome[!is.na(hpv), rank.hpv:=2]
ordered.exome[hpv==6|hpv==11, rank.hpv:=4]
ordered.exome[,rank.exome := dna.rank^2*rank.express*as.numeric(trio.duo.sol)*rank.hpv/rank.illumina^2]
ordered.exome[is.na(rank.exome), rank.exome:=signif(quantile(ordered.exome[,rank.exome], na.rm = TRUE, probs = 0.1), digits = 2)]# some ranks are NA because there was just one piece of missing data. I therefore assigned all of them to have the 10th percentile rank.exome score
# Generate list of suitable samples-------------------------
exome.list <- ordered.exome[!nuc.ac.nr %in% nuc.ac.multiplex][sample(.N,22, replace=FALSE, prob=rank.exome)][order(-rank.exome)]#gives us random sample weighted for most suitable in descending order of suitability
nuc.ac.exome.list <- exome.list[,nuc.ac.nr]
#how about some adult onset since it may be a different disease
exome.ao <- ordered.exome[dxage>18][sample(.N,8, replace=FALSE, prob=(rank.exome))][order(-rank.exome)]
nuc.ac.exome.ao <- exome.ao[,nuc.ac.nr]
#how about some sets of parents from trios but not with mendelian errors
# consulted geneticist will not do trio parents at this time Wednesday, 15 Apr 2015 15:01
exome.trio <- exome.list[!PtCode %chin% paste0(mendel.error.fam,"PT")&trio.duo.sol=="trio"&TissCode=="WB"][sample(.N, 0, replace=FALSE, prob=rank.express),PtCode]
exome.trio <- str_sub(exome.trio,end=8L)#to convert the names from PtCode to trio name
exome.parents <- DNA.rank.WB[str_sub(PtCode, end=8L) %chin% exome.trio&relationship %chin% c("FA", "MO")]
DNA.rank.WB[relationship %chin% c("FA", "MO")]
nuc.ac.exome.parents <- exome.parents[,nuc.ac.nr]
#how about some children with mild disease
# instead of weighting by rank.exome, we will weight by rank.exome divided by square of expressivitiy
# set a limit that their age of diagnosis must be <12
exome.indolent <- ordered.exome[!nuc.ac.nr %in% nuc.ac.multiplex&dxage<12&aggr.max.freq<4&aggr.surgcount<10&aggr.distal=="cleardist"&aggr.tracheostomy=="Never"&!(is.na(aggr.surgcount)|is.na(aggr.max.freq)|is.na(avg.annual.frq)|is.na(aggr.distal)|is.na(aggr.tracheostomy)|is.na(dxage))][sample(.N,8, replace=FALSE, prob=rank.exome/rank.express^2)][order(-rank.exome)]#gives us random sample weighted for most suitable in descending order of suitability, Eliminated rank
summary(ordered.exome$rank.express)
nuc.ac.exome.indolent <- exome.indolent[,nuc.ac.nr]
nuc.ac.for.exome <- unique(c(nuc.ac.multiplex, nuc.ac.exome.list, nuc.ac.exome.ao, nuc.ac.exome.parents, nuc.ac.exome.indolent))
length(nuc.ac.for.exome)
#save(nuc.ac.for.exome, file = "nucleic acid whole exome.RData")
multip.n <- length(nuc.ac.multiplex)
high.penetrance.n <- length(nuc.ac.exome.list)
ao.n <- length(nuc.ac.exome.ao)
parents <- length(nuc.ac.exome.parents)
indolent <- length(nuc.ac.exome.indolent)
sum(multip.n, high.penetrance.n, ao.n, parents, indolent)
# find the samples for evan----------------
load(file="plating data minus 106.RData")
setkey(plating,nuc.ac.nr)
gofind <- plating[nuc.ac.nr %in% nuc.ac.for.exome,list(nuc.ac.nr,plate,well)][order(plate,well)]#samples in the big agena send out
gofind.106 <- setdiff(nuc.ac.for.exome, plating$nuc.ac.nr)#samples that were sent out in the initial 106
go.find.all.exome <- rbindlist(list(gofind, data.table(nuc.ac.nr=gofind.106)), fill=TRUE, use.names=TRUE)
write.csv(go.find.all.exome, file = "go find all exome.csv", row.names = FALSE)
# problem specimens notified 2015-04-22--------------
# http://gsl.hudsonalpha.org/projects/haib15FJB3120/view_project
problem.sampl <- data.table(hdslph=c(2, 1, 30, 17, 8, 6), nuc.ac.nr=c(760, 736, 600, 793, 759, 384), problem=c("low quantity", "low quantity", "degraded", "degraded", "low quantity", "degraded"), key="nuc.ac.nr")
problem.but.multiplex <- intersect(problem.sampl$nuc.ac.nr, nuc.ac.multiplex)
problem.sampl[J(problem.but.multiplex), resolve:="continue because valuable multiplex"]
setkey(ordered.exome,nuc.ac.nr)
ordered.exome[problem.sampl]# appears as if nuc.ac.nr 600 was selected because it came from a an affected person with a high expressivity of the disease and their absorbance ratio and yield of DNA was good notwithstanding that it was from a mouthwash specimen. Let us replace it
problem.sampl[J(c(600, 384)),resolve:="replace with another sample from high expressivity"]
load("nucleic acid whole exome.RData")# do this so we get the actual original specimens sent
exome.list <- ordered.exome[!nuc.ac.nr %in% c(nuc.ac.multiplex, nuc.ac.for.exome) ][sample(.N,1, replace=FALSE, prob=rank.exome)][order(-rank.exome)]#gives us random sample weighted for most suitable in descending order of suitability
#nuc.ac.exome.substitute <- c(exome.list[,nuc.ac.nr], nuc.ac.exome.substitute)
#save(nuc.ac.exome.substitute, file = "nucleic acid whole exome substitute.RData")
| /exome sequence sample selection.r | no_license | FarrelBuch/gene_analysis | R | false | false | 11,237 | r | # Select samples for Whole Exome Sequencing
# multiplex families
# affecteds
# most severe clinical course
# earliest onset
# good DNA (ie whole blood, good ABS ratio, non-extreme concentration)
# have HumanOmni1-Quad data
# part of trios favored over duos
# HPV typing available favored over not
library(ggplot2)
source(file="Multiplex family germline.r")# brings in nuc.ac.multiplex which is nuc.ac.nr of all the those in the multiplex family
mendel.error.fam <- c("BJW18006", "DET10003", "FJB01117", "FKK24002", "GHP04013", "JEM04008", "JWT08002", "PXC08007")#families that were deleted from the TDTae alogrithm "delete 8 families with highest Mendel errors from the dataset", https://mail.google.com/mail/u/0/#apps/subject%3A(Some+additional+details+on+the+plink+run+)/n25/1358bc48a0e710bb
express.rank <- affected[,list(PtCode, dxage, rankage = rankage <- frank(dxage), aggr.max.freq, rank.max.freq = rank.max.freq <- rank(-aggr.max.freq), avg.annual.frq, rank.annual.freq = rank.annual.freq <- rank(-avg.annual.frq), aggr.distal, rank.distal = rank.distal <- rank(aggr.distal), aggr.tracheostomy, rank.tracheost = rank.tracheost <- rank(aggr.tracheostomy),aggr.surgcount, rank.surgcount = rank.surgcount <- rank(-aggr.surgcount),hpv, rank.express=(aggr.max.freq*(1+avg.annual.frq)*(1+aggr.surgcount)*rank.tracheost*rank.distal)/(dxage))]#rank.express provides a rank (higher is more expressive) of the expressiviity of any susceptibility. They are people who were diagnosed the youngest and had the most aggressive clinical course. Added a 1 to avg.annual.frq and aggr.surgcount when calculating rank.express since some zeros messed with the multiplication.
express.rank[PtCode=="RYS16028PT", rank.express:=NA]
express.rank[order(-rank.express)][seq(from=1, to=0.5*.N, by = 10 )]
express.rank[is.na(rank.express), rank.express:=signif(quantile(express.rank$rank.express, na.rm = TRUE, probs = 0.1), digits = 2)]# some ranks are NA because there was just one piece of missing data. I therefore assigned all of them to have the 10th percentile rank.exome score
express.rank[rank.express==0]
DNA.rank.WB <- all.subj.byspec[TissCode=="WB",list(PtCode, relationship, nuc.ac.nr,dnaconcngpmicl, absratio, absratiolow, rank.absratio=rank.absratio <- rank(absratiolow), dnayieldmicrog, yieldlow = yieldlow <- ifelse(-scale(log(dnayieldmicrog))<1,1,-scale(log(dnayieldmicrog))), dna.rank=1/(yieldlow*(0.1+absratiolow)), TissCode)] #DNA.rank provides a rank (higher number is more favorable) of our DNA stock solutions. We chose the dna stock yield that was within 1 sd of the log dnayield or above.
all.subj.byspec[TissCode=="WB",list(dnayieldmicrog, ifelse(-scale(log(dnayieldmicrog))<1,1,-scale(log(dnayieldmicrog))))]
DNA.rank.WB[order(-dna.rank)][seq(from=1, to=.N, by = 15 )]
DNA.rank.MW <- all.subj.byspec[TissCode=="MW",list(PtCode, relationship, nuc.ac.nr,dnaconcngpmicl, absratio, absratiolow, rank.absratio=rank.absratio <- rank(absratiolow), dnayieldmicrog, dna.rank=dnayieldmicrog/(0.1+absratiolow), TissCode)] #DNA.rank provides a rank (higher number is more favorable) of our DNA stock solutions. For MW it is calculated differently. Here we do not favor the average yield but rather the hihgest yield.
DNA.rank.MW[dnayieldmicrog<=0, dna.rank:=0.0001]
DNA.rank.MW[order(-dna.rank)][seq(from=1, to=.N, by = 15 )]
#did dnayield from mw improveover time
ggplot(all.subj.byspec[TissCode=="MW"], aes(x=DNAextract.date, y=log2(dnayieldmicrog))) + geom_point() + stat_smooth(method = "lm")
summary(lm(log2(dnayieldmicrog)~DNAextract.date, data =all.subj.byspec[TissCode=="MW"]))
# library(BayesianFirstAid)
# fit2 <- bayes.t.test(log2(dnayieldmicrog)~DNAextract.date>as.IDate("2009-01-01"), data=all.subj.byspec[TissCode=="MW"])
# summary(fit2)
# plot(fit2)
t.test(log2(dnayieldmicrog)~DNAextract.date>as.IDate("2009-01-01"), data=all.subj.byspec[TissCode=="MW"])
ggplot(data=all.subj.byspec[TissCode=="MW"], aes(x=DNAextract.date>as.IDate("2009-01-01"), y=log2(dnayieldmicrog))) + geom_boxplot()
mw.stock <- subset(all.subj.byspec, TissCode=="MW", c(dnayieldmicrog,DNAextract.date) )
mw.stock[, extract.era:=ifelse(DNAextract.date>as.IDate("2009-01-01"), "since2009", "before2009")]
ggplot(data=mw.stock, aes(x=log2(dnayieldmicrog))) + geom_histogram(binwidth = 0.8) + facet_grid(extract.era~.) + aes(y = ..density..)
l.all.germ <- list(DNA.rank.WB, DNA.rank.MW)
DNA.rank <- rbindlist(l.all.germ,use.names=TRUE, fill=TRUE)
DNA.rank[, dna.rank:=dna.rank*(rank(TissCode)^2)]#dna.rank therefore accounts for best ratio and good yield and the type of tissue that it was extracted from where WB is better than mouthwash
DNA.rank[order(-dna.rank)][seq(from=1, to=.N, by = 25 )]
ggplot(all.subj.byspec[TissCode!="BUC"], aes(x=log10(dnayieldmicrog))) + geom_histogram(binwidth=0.25) + facet_grid(TissCode~.) + aes(y = ..density..) + ggtitle("Yield of DNA from each tissue type")
illumina.omni <- rpinfinwrk.dt[day2blue=="Y",nuc.ac.nr]# nuc.ac.nr of all specimens that we have illumina.omni data on
setkey(DNA.rank, PtCode)
setkey(express.rank, PtCode)
ordered.exome <- merge(express.rank, DNA.rank[relationship=="PT",.SD, .SDcols=-relationship])#do not want the DNA specimens from relations getting in here therefore filter for affected patients only
ordered.exome[,c("rankage", "rank.max.freq", "rank.annual.freq", "rank.distal", "rank.tracheost", "rank.surgcount", "absratiolow", "rank.absratio"):= NULL]
# get a field in for illumina genotyping
ordered.exome[, illumina:="noillumina"]
ordered.exome[nuc.ac.nr %in% illumina.omni, illumina:="haveillumina"]
ordered.exome[, rank.illumina:=rank(illumina)]#lower number is better so it should go in the denominator
# get a field in for trio vs duo vs solo
fam.dt <- all.subj.byspec[,list(father=any(relationship=="FA"), mother=any(relationship=="MO")),by=family]# fam.dt is a data.table to put the family status in one table
fam.dt[, PtCode:=paste0(family,"PT")]
fam.dt[, trio.duo.sol:=factor(1+father+mother, labels = c("solo", "duo", "trio"))]
setkey(fam.dt,PtCode)
ordered.exome[fam.dt, trio.duo.sol:= i.trio.duo.sol]# read about this at data.table join then add columns to existing data.frame without re-copy at http://stackoverflow.com/questions/19553005/data-table-join-then-add-columns-to-existing-data-frame-without-re-copy
# add a score for HPV type to multiply to numerator to find best affecteds to send for whole exome sequencing.
ordered.exome[,rank.hpv:=1]
ordered.exome[!is.na(hpv), rank.hpv:=2]
ordered.exome[hpv==6|hpv==11, rank.hpv:=4]
ordered.exome[,rank.exome := dna.rank^2*rank.express*as.numeric(trio.duo.sol)*rank.hpv/rank.illumina^2]
ordered.exome[is.na(rank.exome), rank.exome:=signif(quantile(ordered.exome[,rank.exome], na.rm = TRUE, probs = 0.1), digits = 2)]# some ranks are NA because there was just one piece of missing data. I therefore assigned all of them to have the 10th percentile rank.exome score
# Generate list of suitable samples-------------------------
exome.list <- ordered.exome[!nuc.ac.nr %in% nuc.ac.multiplex][sample(.N,22, replace=FALSE, prob=rank.exome)][order(-rank.exome)]#gives us random sample weighted for most suitable in descending order of suitability
nuc.ac.exome.list <- exome.list[,nuc.ac.nr]
#how about some adult onset since it may be a different disease
exome.ao <- ordered.exome[dxage>18][sample(.N,8, replace=FALSE, prob=(rank.exome))][order(-rank.exome)]
nuc.ac.exome.ao <- exome.ao[,nuc.ac.nr]
#how about some sets of parents from trios but not with mendelian errors
# consulted geneticist will not do trio parents at this time Wednesday, 15 Apr 2015 15:01
exome.trio <- exome.list[!PtCode %chin% paste0(mendel.error.fam,"PT")&trio.duo.sol=="trio"&TissCode=="WB"][sample(.N, 0, replace=FALSE, prob=rank.express),PtCode]
exome.trio <- str_sub(exome.trio,end=8L)#to convert the names from PtCode to trio name
exome.parents <- DNA.rank.WB[str_sub(PtCode, end=8L) %chin% exome.trio&relationship %chin% c("FA", "MO")]
DNA.rank.WB[relationship %chin% c("FA", "MO")]
nuc.ac.exome.parents <- exome.parents[,nuc.ac.nr]
#how about some children with mild disease
# instead of weighting by rank.exome, we will weight by rank.exome divided by square of expressivitiy
# set a limit that their age of diagnosis must be <12
exome.indolent <- ordered.exome[!nuc.ac.nr %in% nuc.ac.multiplex&dxage<12&aggr.max.freq<4&aggr.surgcount<10&aggr.distal=="cleardist"&aggr.tracheostomy=="Never"&!(is.na(aggr.surgcount)|is.na(aggr.max.freq)|is.na(avg.annual.frq)|is.na(aggr.distal)|is.na(aggr.tracheostomy)|is.na(dxage))][sample(.N,8, replace=FALSE, prob=rank.exome/rank.express^2)][order(-rank.exome)]#gives us random sample weighted for most suitable in descending order of suitability, Eliminated rank
summary(ordered.exome$rank.express)
nuc.ac.exome.indolent <- exome.indolent[,nuc.ac.nr]
nuc.ac.for.exome <- unique(c(nuc.ac.multiplex, nuc.ac.exome.list, nuc.ac.exome.ao, nuc.ac.exome.parents, nuc.ac.exome.indolent))
length(nuc.ac.for.exome)
#save(nuc.ac.for.exome, file = "nucleic acid whole exome.RData")
multip.n <- length(nuc.ac.multiplex)
high.penetrance.n <- length(nuc.ac.exome.list)
ao.n <- length(nuc.ac.exome.ao)
parents <- length(nuc.ac.exome.parents)
indolent <- length(nuc.ac.exome.indolent)
sum(multip.n, high.penetrance.n, ao.n, parents, indolent)
# find the samples for evan----------------
load(file="plating data minus 106.RData")
setkey(plating,nuc.ac.nr)
gofind <- plating[nuc.ac.nr %in% nuc.ac.for.exome,list(nuc.ac.nr,plate,well)][order(plate,well)]#samples in the big agena send out
gofind.106 <- setdiff(nuc.ac.for.exome, plating$nuc.ac.nr)#samples that were sent out in the initial 106
go.find.all.exome <- rbindlist(list(gofind, data.table(nuc.ac.nr=gofind.106)), fill=TRUE, use.names=TRUE)
write.csv(go.find.all.exome, file = "go find all exome.csv", row.names = FALSE)
# problem specimens notified 2015-04-22--------------
# http://gsl.hudsonalpha.org/projects/haib15FJB3120/view_project
problem.sampl <- data.table(hdslph=c(2, 1, 30, 17, 8, 6), nuc.ac.nr=c(760, 736, 600, 793, 759, 384), problem=c("low quantity", "low quantity", "degraded", "degraded", "low quantity", "degraded"), key="nuc.ac.nr")
problem.but.multiplex <- intersect(problem.sampl$nuc.ac.nr, nuc.ac.multiplex)
problem.sampl[J(problem.but.multiplex), resolve:="continue because valuable multiplex"]
setkey(ordered.exome,nuc.ac.nr)
ordered.exome[problem.sampl]# appears as if nuc.ac.nr 600 was selected because it came from a an affected person with a high expressivity of the disease and their absorbance ratio and yield of DNA was good notwithstanding that it was from a mouthwash specimen. Let us replace it
problem.sampl[J(c(600, 384)),resolve:="replace with another sample from high expressivity"]
load("nucleic acid whole exome.RData")# do this so we get the actual original specimens sent
exome.list <- ordered.exome[!nuc.ac.nr %in% c(nuc.ac.multiplex, nuc.ac.for.exome) ][sample(.N,1, replace=FALSE, prob=rank.exome)][order(-rank.exome)]#gives us random sample weighted for most suitable in descending order of suitability
#nuc.ac.exome.substitute <- c(exome.list[,nuc.ac.nr], nuc.ac.exome.substitute)
#save(nuc.ac.exome.substitute, file = "nucleic acid whole exome substitute.RData")
|
library(dplyr)
library(ggplot2)
png(filename = "/Users/vasudok/R Programming/Coursera R/EDAProject2/plot3.png")
ggplot(baltimore,aes(factor(year),Emissions)) +
geom_bar(stat="identity") +
facet_grid(.~type) +
labs(x = "Year", y = "PM2.5 Emission (Tons)") +
labs(title = "PM2.5 Emissions Per Year in Baltimore City By Type")
dev.off() | /plot3.R | no_license | vasudok/EDAProject2 | R | false | false | 346 | r | library(dplyr)
library(ggplot2)
png(filename = "/Users/vasudok/R Programming/Coursera R/EDAProject2/plot3.png")
ggplot(baltimore,aes(factor(year),Emissions)) +
geom_bar(stat="identity") +
facet_grid(.~type) +
labs(x = "Year", y = "PM2.5 Emission (Tons)") +
labs(title = "PM2.5 Emissions Per Year in Baltimore City By Type")
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/svm.functions.R
\name{svmrfeFeatureRanking}
\alias{svmrfeFeatureRanking}
\title{SVM Recursive Feature Extraction (Binary)}
\usage{
svmrfeFeatureRanking(x, y, c, perc.rem = 10)
}
\arguments{
\item{x}{A matrix where each column represents a feature and each row
represents a sample}
\item{y}{A vector of labels corresponding to each sample's group membership}
\item{c}{A numeric value corresponding to the 'cost' applied during the
svm model fitting. This can be selected by the user if using this
function directly or is done internally.}
\item{perc.rem}{A numeric value indicating the percent of features
removed during each iteration. Default \code{perc.rem = 10}.}
}
\value{
Vector of features ranked from most important to least important.
}
\description{
This conducts feature selection for Support Vector Machines
models via recursive feature extraction. This returns a vector of the
features in x ordered by relevance. The first item of the vector has the
index of the feature which is more relevant to perform the classification
and the last item of the vector has the feature which is less relevant.
This function is specific to Binary classification problems,
}
\examples{
dat.discr <- create.discr.matrix(
create.corr.matrix(
create.random.matrix(nvar = 50,
nsamp = 100,
st.dev = 1,
perturb = 0.2)),
D = 10
)
vars <- dat.discr$discr.mat
groups <- dat.discr$classes
# binary class feature ranking
svmrfeFeatureRanking(x = vars,
y = groups,
c = 0.1,
perc.rem = 10)
}
\references{
Guyon I. et. al. (2010) \emph{Gene Selection for Cancer
Classification using Support Vector Machines}. Machine Learning 46 389-422.
}
\seealso{
\code{\link{svmrfeFeatureRankingForMulticlass}}
}
| /man/svmrfeFeatureRanking.Rd | no_license | cdeterman/OmicsMarkeR | R | false | true | 1,946 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/svm.functions.R
\name{svmrfeFeatureRanking}
\alias{svmrfeFeatureRanking}
\title{SVM Recursive Feature Extraction (Binary)}
\usage{
svmrfeFeatureRanking(x, y, c, perc.rem = 10)
}
\arguments{
\item{x}{A matrix where each column represents a feature and each row
represents a sample}
\item{y}{A vector of labels corresponding to each sample's group membership}
\item{c}{A numeric value corresponding to the 'cost' applied during the
svm model fitting. This can be selected by the user if using this
function directly or is done internally.}
\item{perc.rem}{A numeric value indicating the percent of features
removed during each iteration. Default \code{perc.rem = 10}.}
}
\value{
Vector of features ranked from most important to least important.
}
\description{
This conducts feature selection for Support Vector Machines
models via recursive feature extraction. This returns a vector of the
features in x ordered by relevance. The first item of the vector has the
index of the feature which is more relevant to perform the classification
and the last item of the vector has the feature which is less relevant.
This function is specific to Binary classification problems,
}
\examples{
dat.discr <- create.discr.matrix(
create.corr.matrix(
create.random.matrix(nvar = 50,
nsamp = 100,
st.dev = 1,
perturb = 0.2)),
D = 10
)
vars <- dat.discr$discr.mat
groups <- dat.discr$classes
# binary class feature ranking
svmrfeFeatureRanking(x = vars,
y = groups,
c = 0.1,
perc.rem = 10)
}
\references{
Guyon I. et. al. (2010) \emph{Gene Selection for Cancer
Classification using Support Vector Machines}. Machine Learning 46 389-422.
}
\seealso{
\code{\link{svmrfeFeatureRankingForMulticlass}}
}
|
library(tidyverse)
water <- read_tsv("../../data/water_cleaned.txt") %>%
mutate(has_value = if_else(is.na(value) == TRUE, 0, 1),
time_pretty = as.character(time_pretty)) %>%
group_by(metabolite) %>%
mutate(total_values = sum(has_value)) %>%
filter(total_values > 6)
p <- water %>%
mutate(has_value = if_else(is.na(value) == TRUE, 0, 1)) %>%
group_by(metabolite) %>%
mutate(total_values = sum(has_value)) %>%
filter(value < 100000 | is.na(value) == TRUE,
total_values > 6) %>%
ggplot(aes(x = time_pretty, y = value, color = metabolite, linetype = extraction, group = interaction(extraction, metabolite))) +
geom_path() +
facet_grid(machine ~location) +
theme(
legend.position = "bottom",
axis.text.x = element_text(angle = 45, hjust = 1)
) +
scale_color_viridis_d(name = "") +
labs(
title = "Observed concentrations of each metabolite over time, by location",
subtitle = "Values are averaged over extractions and machine runs. Metabolites limited to those with at least 7 non-missing values.",
y = "Concentration (ng/mL)",
x = "Time"
)
ggsave("observed-metabolite-time-series.png", p, dpi = 600)
| /reports/acs-presentation/observed-metabolite-time-series.R | no_license | mloop/uf-wastewater | R | false | false | 1,172 | r | library(tidyverse)
water <- read_tsv("../../data/water_cleaned.txt") %>%
mutate(has_value = if_else(is.na(value) == TRUE, 0, 1),
time_pretty = as.character(time_pretty)) %>%
group_by(metabolite) %>%
mutate(total_values = sum(has_value)) %>%
filter(total_values > 6)
p <- water %>%
mutate(has_value = if_else(is.na(value) == TRUE, 0, 1)) %>%
group_by(metabolite) %>%
mutate(total_values = sum(has_value)) %>%
filter(value < 100000 | is.na(value) == TRUE,
total_values > 6) %>%
ggplot(aes(x = time_pretty, y = value, color = metabolite, linetype = extraction, group = interaction(extraction, metabolite))) +
geom_path() +
facet_grid(machine ~location) +
theme(
legend.position = "bottom",
axis.text.x = element_text(angle = 45, hjust = 1)
) +
scale_color_viridis_d(name = "") +
labs(
title = "Observed concentrations of each metabolite over time, by location",
subtitle = "Values are averaged over extractions and machine runs. Metabolites limited to those with at least 7 non-missing values.",
y = "Concentration (ng/mL)",
x = "Time"
)
ggsave("observed-metabolite-time-series.png", p, dpi = 600)
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.13206735342666e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615774227-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 362 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.13206735342666e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xmu_make_top_twin_models.R
\name{xmu_make_TwinSuperModel}
\alias{xmu_make_TwinSuperModel}
\title{Helper to make a basic top, MZ, and DZ model.}
\usage{
xmu_make_TwinSuperModel(
name = "twin_super",
mzData,
dzData,
selDVs,
selCovs = NULL,
sep = NULL,
type = c("Auto", "FIML", "cov", "cor", "WLS", "DWLS", "ULS"),
allContinuousMethod = c("cumulants", "marginals"),
numObsMZ = NULL,
numObsDZ = NULL,
nSib = 2,
equateMeans = TRUE,
weightVar = NULL,
bVector = FALSE,
dropMissingDef = TRUE,
verbose = FALSE
)
}
\arguments{
\item{name}{for the supermodel}
\item{mzData}{Dataframe containing the MZ data}
\item{dzData}{Dataframe containing the DZ data}
\item{selDVs}{List of manifest base names (e.g. BMI, NOT 'BMI_T1') (OR, you don't set "sep", the full variable names)}
\item{selCovs}{List of covariate base names (e.g. age, NOT 'age_T1') (OR, you don't set "sep", the full variable names)}
\item{sep}{string used to expand selDVs into selVars, i.e., "_T" to expand BMI into BMI_T1 and BMI_T2 (optional but STRONGLY encouraged)}
\item{type}{One of 'Auto','FIML','cov', 'cor', 'WLS','DWLS', or 'ULS'. Auto tries to react to the incoming mxData type (raw/cov).}
\item{allContinuousMethod}{"cumulants" or "marginals". Used in all-continuous WLS data to determine if a means model needed.}
\item{numObsMZ}{Number of MZ observations contributing (for summary data only)}
\item{numObsDZ}{Number of DZ observations contributing (for summary data only)}
\item{nSib}{Number of members per family (default = 2)}
\item{equateMeans}{Whether to equate T1 and T2 means (default = TRUE).}
\item{weightVar}{If provided, a vector objective will be used to weight the data. (default = NULL).}
\item{bVector}{Whether to compute row-wise likelihoods (defaults to FALSE).}
\item{dropMissingDef}{Whether to automatically drop missing def var rows for the user (default = TRUE). You get a polite note.}
\item{verbose}{(default = FALSE)}
}
\value{
\itemize{
\item \code{\link[=mxModel]{mxModel()}}s for top, MZ and DZ.
}
}
\description{
\code{xmu_make_TwinSuperModel} makes basic twin model containing \code{top}, \code{MZ}, and \code{DZ} models. It intelligently handles thresholds for
ordinal data, and means model for covariates matrices in the twin models if needed.
It's the replacement for \code{xmu_assemble_twin_supermodel} approach.
}
\details{
\code{xmu_make_TwinSuperModel} is used in twin models (e.g.\code{\link[=umxCP]{umxCP()}}, \code{\link[=umxACE]{umxACE()}} and \code{\link[=umxACEv]{umxACEv()}} and will be added to the other models: \code{\link[=umxGxE]{umxGxE()}}, \code{\link[=umxIP]{umxIP()}},
simplifying code maintenance.
It takes \code{mzData} and \code{dzData}, a list of the \code{selDVs} to analyse and optional \code{selCovs} (as well as \code{sep} and \code{nSib}), along with other
relevant information such as whether the user wants to \code{equateMeans}.
It can also handle a \code{weightVar}.
If covariates are passed in these are included in the means model (via a call to \code{xmuTwinUpgradeMeansToCovariateModel}.
\strong{Modeling}
\strong{Matrices created}
\emph{top model}
For raw and WLS data, \code{top} contains a \code{expMeans} matrix (if needed). For summary data, the top model contains only a name.
For ordinal data, \code{top} gains \code{top.threshMat} (from a call to \code{\link[=umxThresholdMatrix]{umxThresholdMatrix()}}).
For covariates, top stores the \code{intercepts} matrix and a \code{betaDef} matrix. These are then used to make expMeans in \code{MZ} and \code{DZ}.
\emph{MZ and DZ models}
\code{MZ} and \code{DZ} contain the data, and an expectation referencing \code{top.expCovMZ} and \code{top.expMean}, and, \code{vector = bVector}.
For continuous raw data, MZ and DZ contain \code{\link[OpenMx:mxExpectationNormal]{OpenMx::mxExpectationNormal()}} and \code{\link[OpenMx:mxFitFunctionML]{OpenMx::mxFitFunctionML()}}.
For WLS these the fit function is switched to \code{\link[OpenMx:mxFitFunctionWLS]{OpenMx::mxFitFunctionWLS()}} with appropriate \code{type} and \code{allContinuousMethod}.
For binary, a constraint and algebras are included to constrain \code{Vtot} (A+C+E) to 1.
If a \code{weightVar} is detected, these columns are used to create a row-weighted MZ and DZ models.
If \code{equateMeans} is \code{TRUE}, then the Twin-2 vars in the mean matrix are equated by label with Twin-1.
Decent starts are guessed from the data.
\code{varStarts} is computed as \code{sqrt(variance)/3} of the DVs and \code{meanStarts} as the variable means.
For raw data, a check is made for ordered variables. For Binary variables, means are fixed at 0 and
total variance (A+C+E) is fixed at 1. For ordinal variables, the first 2 thresholds are fixed.
Where needed, e.g. continuous raw data, top adds a means matrix "expMean".
For ordinal data, top adds a \code{\link[=umxThresholdMatrix]{umxThresholdMatrix()}}.
If binary variables are present, matrices and a constraint to hold \code{A+C+E == 1} are added to top.
If a weight variable is offered up, an \code{mzWeightMatrix} will be added.
\strong{Data handling}
In terms of data handling, \code{xmu_make_TwinSuperModel} was primarily designed to take
data.frames and process these into mxData.
It can also, however, handle cov and mxData input.
It can process data into all the types supported by \code{mxData}.
Raw data input with a target of \code{cov} or \code{cor} type requires the \code{numObsMZ} and \code{numObsDZ} to be set.
Type "WLS", "DWLS", or "ULS", data remain raw, but are handled as WLS in the \code{\link[OpenMx:mxFitFunctionWLS]{OpenMx::mxFitFunctionWLS()}}.
Unused columns are dropped.
If you pass in raw data, you can't request type cov/cor yet. Will work on this if desired.
}
\examples{
# ==============
# = Continuous =
# ==============
library(umx)
data(twinData)
twinData = umx_scale(twinData, varsToScale= c('ht1','ht2'))
mzData = twinData[twinData$zygosity \%in\% "MZFF",]
dzData = twinData[twinData$zygosity \%in\% "DZFF",]
m1= xmu_make_TwinSuperModel(mzData=mzData, dzData=dzData, selDVs=c("wt","ht"), sep="", nSib=2)
names(m1) # "top" "MZ" "DZ"
class(m1$MZ$fitfunction)[[1]] == "MxFitFunctionML"
# ====================
# = With a covariate =
# ====================
m1= xmu_make_TwinSuperModel(mzData=mzData, dzData=dzData,
selDVs= "wt", selCovs= "age", sep="", nSib=2)
m1$top$intercept$labels
m1$MZ$expMean
# ===============
# = WLS example =
# ===============
m1=xmu_make_TwinSuperModel(mzData=mzData, dzData=dzData,selDVs=c("wt","ht"),sep="",type="WLS")
class(m1$MZ$fitfunction)[[1]] == "MxFitFunctionWLS"
m1$MZ$fitfunction$type =="WLS"
# Check default all-continuous method
m1$MZ$fitfunction$continuousType == "cumulants"
# Choose non-default type (DWLS)
m1= xmu_make_TwinSuperModel(mzData= mzData, dzData= dzData,
selDVs= c("wt","ht"), sep="", type="DWLS")
m1$MZ$fitfunction$type =="DWLS"
class(m1$MZ$fitfunction)[[1]] == "MxFitFunctionWLS"
# Switch WLS method
m1 = xmu_make_TwinSuperModel(mzData= mzData, dzData= dzData, selDVs= c("wt","ht"), sep= "",
type = "WLS", allContinuousMethod = "marginals")
m1$MZ$fitfunction$continuousType == "marginals"
class(m1$MZ$fitfunction)[[1]] == "MxFitFunctionWLS"
# ============================================
# = Bivariate continuous and ordinal example =
# ============================================
data(twinData)
selDVs = c("wt", "obese")
# Cut BMI column to form ordinal obesity variables
ordDVs = c("obese1", "obese2")
obesityLevels = c('normal', 'overweight', 'obese')
cutPoints = quantile(twinData[, "bmi1"], probs = c(.5, .2), na.rm = TRUE)
twinData$obese1 = cut(twinData$bmi1, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels)
twinData$obese2 = cut(twinData$bmi2, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels)
# Make the ordinal variables into mxFactors (ensure ordered is TRUE, and require levels)
twinData[, ordDVs] = umxFactor(twinData[, ordDVs])
mzData = twinData[twinData$zygosity \%in\% "MZFF",]
dzData = twinData[twinData$zygosity \%in\% "DZFF",]
m1 = xmu_make_TwinSuperModel(mzData= mzData, dzData= dzData, selDVs= selDVs, sep="", nSib= 2)
names(m1) # "top" "MZ" "DZ"
# ==============
# = One binary =
# ==============
data(twinData)
cutPoints = quantile(twinData[, "bmi1"], probs = .2, na.rm = TRUE)
obesityLevels = c('normal', 'obese')
twinData$obese1 = cut(twinData$bmi1, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels)
twinData$obese2 = cut(twinData$bmi2, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels)
ordDVs = c("obese1", "obese2")
twinData[, ordDVs] = umxFactor(twinData[, ordDVs])
selDVs = c("wt", "obese")
mzData = twinData[twinData$zygosity \%in\% "MZFF",]
dzData = twinData[twinData$zygosity \%in\% "DZFF",]
m1 = xmu_make_TwinSuperModel(mzData= mzData, dzData= dzData, selDVs= selDVs, sep= "", nSib= 2)
# ========================================
# = Cov data (calls xmuTwinSuper_CovCor) =
# ========================================
data(twinData)
mzData =cov(twinData[twinData$zygosity \%in\% "MZFF", tvars(c("wt","ht"), sep="")], use="complete")
dzData =cov(twinData[twinData$zygosity \%in\% "DZFF", tvars(c("wt","ht"), sep="")], use="complete")
m1 = xmu_make_TwinSuperModel(mzData= mzData, dzData= dzData, selDVs= "wt", sep= "",
nSib= 2, numObsMZ = 100, numObsDZ = 100, verbose=TRUE)
class(m1$MZ$fitfunction)[[1]] =="MxFitFunctionML"
dimnames(m1$MZ$data$observed)[[1]]==c("wt1", "wt2")
}
\seealso{
Other xmu internal not for end user:
\code{\link{umxModel}()},
\code{\link{umxRenameMatrix}()},
\code{\link{umx_APA_pval}()},
\code{\link{umx_fun_mean_sd}()},
\code{\link{umx_get_bracket_addresses}()},
\code{\link{umx_make}()},
\code{\link{umx_standardize}()},
\code{\link{umx_string_to_algebra}()},
\code{\link{xmuHasSquareBrackets}()},
\code{\link{xmuLabel_MATRIX_Model}()},
\code{\link{xmuLabel_Matrix}()},
\code{\link{xmuLabel_RAM_Model}()},
\code{\link{xmuMI}()},
\code{\link{xmuMakeDeviationThresholdsMatrices}()},
\code{\link{xmuMakeOneHeadedPathsFromPathList}()},
\code{\link{xmuMakeTwoHeadedPathsFromPathList}()},
\code{\link{xmuMaxLevels}()},
\code{\link{xmuMinLevels}()},
\code{\link{xmuPropagateLabels}()},
\code{\link{xmuRAM2Ordinal}()},
\code{\link{xmuTwinSuper_Continuous}()},
\code{\link{xmuTwinSuper_NoBinary}()},
\code{\link{xmuTwinUpgradeMeansToCovariateModel}()},
\code{\link{xmu_CI_merge}()},
\code{\link{xmu_CI_stash}()},
\code{\link{xmu_DF_to_mxData_TypeCov}()},
\code{\link{xmu_PadAndPruneForDefVars}()},
\code{\link{xmu_bracket_address2rclabel}()},
\code{\link{xmu_cell_is_on}()},
\code{\link{xmu_check_levels_identical}()},
\code{\link{xmu_check_needs_means}()},
\code{\link{xmu_check_variance}()},
\code{\link{xmu_clean_label}()},
\code{\link{xmu_data_missing}()},
\code{\link{xmu_data_swap_a_block}()},
\code{\link{xmu_describe_data_WLS}()},
\code{\link{xmu_dot_make_paths}()},
\code{\link{xmu_dot_make_residuals}()},
\code{\link{xmu_dot_maker}()},
\code{\link{xmu_dot_move_ranks}()},
\code{\link{xmu_dot_rank_str}()},
\code{\link{xmu_extract_column}()},
\code{\link{xmu_get_CI}()},
\code{\link{xmu_lavaan_process_group}()},
\code{\link{xmu_make_bin_cont_pair_data}()},
\code{\link{xmu_make_mxData}()},
\code{\link{xmu_match.arg}()},
\code{\link{xmu_name_from_lavaan_str}()},
\code{\link{xmu_path2twin}()},
\code{\link{xmu_path_regex}()},
\code{\link{xmu_print_algebras}()},
\code{\link{xmu_rclabel_2_bracket_address}()},
\code{\link{xmu_safe_run_summary}()},
\code{\link{xmu_set_sep_from_suffix}()},
\code{\link{xmu_show_fit_or_comparison}()},
\code{\link{xmu_simplex_corner}()},
\code{\link{xmu_standardize_ACEcov}()},
\code{\link{xmu_standardize_ACEv}()},
\code{\link{xmu_standardize_ACE}()},
\code{\link{xmu_standardize_CP}()},
\code{\link{xmu_standardize_IP}()},
\code{\link{xmu_standardize_RAM}()},
\code{\link{xmu_standardize_SexLim}()},
\code{\link{xmu_standardize_Simplex}()},
\code{\link{xmu_start_value_list}()},
\code{\link{xmu_starts}()},
\code{\link{xmu_summary_RAM_group_parameters}()},
\code{\link{xmu_twin_add_WeightMatrices}()},
\code{\link{xmu_twin_check}()},
\code{\link{xmu_twin_get_var_names}()},
\code{\link{xmu_twin_make_def_means_mats_and_alg}()},
\code{\link{xmu_twin_upgrade_selDvs2SelVars}()}
}
\concept{xmu internal not for end user}
| /man/xmu_make_TwinSuperModel.Rd | no_license | tbates/umx | R | false | true | 12,323 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xmu_make_top_twin_models.R
\name{xmu_make_TwinSuperModel}
\alias{xmu_make_TwinSuperModel}
\title{Helper to make a basic top, MZ, and DZ model.}
\usage{
xmu_make_TwinSuperModel(
name = "twin_super",
mzData,
dzData,
selDVs,
selCovs = NULL,
sep = NULL,
type = c("Auto", "FIML", "cov", "cor", "WLS", "DWLS", "ULS"),
allContinuousMethod = c("cumulants", "marginals"),
numObsMZ = NULL,
numObsDZ = NULL,
nSib = 2,
equateMeans = TRUE,
weightVar = NULL,
bVector = FALSE,
dropMissingDef = TRUE,
verbose = FALSE
)
}
\arguments{
\item{name}{for the supermodel}
\item{mzData}{Dataframe containing the MZ data}
\item{dzData}{Dataframe containing the DZ data}
\item{selDVs}{List of manifest base names (e.g. BMI, NOT 'BMI_T1') (OR, you don't set "sep", the full variable names)}
\item{selCovs}{List of covariate base names (e.g. age, NOT 'age_T1') (OR, you don't set "sep", the full variable names)}
\item{sep}{string used to expand selDVs into selVars, i.e., "_T" to expand BMI into BMI_T1 and BMI_T2 (optional but STRONGLY encouraged)}
\item{type}{One of 'Auto','FIML','cov', 'cor', 'WLS','DWLS', or 'ULS'. Auto tries to react to the incoming mxData type (raw/cov).}
\item{allContinuousMethod}{"cumulants" or "marginals". Used in all-continuous WLS data to determine if a means model needed.}
\item{numObsMZ}{Number of MZ observations contributing (for summary data only)}
\item{numObsDZ}{Number of DZ observations contributing (for summary data only)}
\item{nSib}{Number of members per family (default = 2)}
\item{equateMeans}{Whether to equate T1 and T2 means (default = TRUE).}
\item{weightVar}{If provided, a vector objective will be used to weight the data. (default = NULL).}
\item{bVector}{Whether to compute row-wise likelihoods (defaults to FALSE).}
\item{dropMissingDef}{Whether to automatically drop missing def var rows for the user (default = TRUE). You get a polite note.}
\item{verbose}{(default = FALSE)}
}
\value{
\itemize{
\item \code{\link[=mxModel]{mxModel()}}s for top, MZ and DZ.
}
}
\description{
\code{xmu_make_TwinSuperModel} makes basic twin model containing \code{top}, \code{MZ}, and \code{DZ} models. It intelligently handles thresholds for
ordinal data, and means model for covariates matrices in the twin models if needed.
It's the replacement for \code{xmu_assemble_twin_supermodel} approach.
}
\details{
\code{xmu_make_TwinSuperModel} is used in twin models (e.g.\code{\link[=umxCP]{umxCP()}}, \code{\link[=umxACE]{umxACE()}} and \code{\link[=umxACEv]{umxACEv()}} and will be added to the other models: \code{\link[=umxGxE]{umxGxE()}}, \code{\link[=umxIP]{umxIP()}},
simplifying code maintenance.
It takes \code{mzData} and \code{dzData}, a list of the \code{selDVs} to analyse and optional \code{selCovs} (as well as \code{sep} and \code{nSib}), along with other
relevant information such as whether the user wants to \code{equateMeans}.
It can also handle a \code{weightVar}.
If covariates are passed in these are included in the means model (via a call to \code{xmuTwinUpgradeMeansToCovariateModel}.
\strong{Modeling}
\strong{Matrices created}
\emph{top model}
For raw and WLS data, \code{top} contains a \code{expMeans} matrix (if needed). For summary data, the top model contains only a name.
For ordinal data, \code{top} gains \code{top.threshMat} (from a call to \code{\link[=umxThresholdMatrix]{umxThresholdMatrix()}}).
For covariates, top stores the \code{intercepts} matrix and a \code{betaDef} matrix. These are then used to make expMeans in \code{MZ} and \code{DZ}.
\emph{MZ and DZ models}
\code{MZ} and \code{DZ} contain the data, and an expectation referencing \code{top.expCovMZ} and \code{top.expMean}, and, \code{vector = bVector}.
For continuous raw data, MZ and DZ contain \code{\link[OpenMx:mxExpectationNormal]{OpenMx::mxExpectationNormal()}} and \code{\link[OpenMx:mxFitFunctionML]{OpenMx::mxFitFunctionML()}}.
For WLS these the fit function is switched to \code{\link[OpenMx:mxFitFunctionWLS]{OpenMx::mxFitFunctionWLS()}} with appropriate \code{type} and \code{allContinuousMethod}.
For binary, a constraint and algebras are included to constrain \code{Vtot} (A+C+E) to 1.
If a \code{weightVar} is detected, these columns are used to create a row-weighted MZ and DZ models.
If \code{equateMeans} is \code{TRUE}, then the Twin-2 vars in the mean matrix are equated by label with Twin-1.
Decent starts are guessed from the data.
\code{varStarts} is computed as \code{sqrt(variance)/3} of the DVs and \code{meanStarts} as the variable means.
For raw data, a check is made for ordered variables. For Binary variables, means are fixed at 0 and
total variance (A+C+E) is fixed at 1. For ordinal variables, the first 2 thresholds are fixed.
Where needed, e.g. continuous raw data, top adds a means matrix "expMean".
For ordinal data, top adds a \code{\link[=umxThresholdMatrix]{umxThresholdMatrix()}}.
If binary variables are present, matrices and a constraint to hold \code{A+C+E == 1} are added to top.
If a weight variable is offered up, an \code{mzWeightMatrix} will be added.
\strong{Data handling}
In terms of data handling, \code{xmu_make_TwinSuperModel} was primarily designed to take
data.frames and process these into mxData.
It can also, however, handle cov and mxData input.
It can process data into all the types supported by \code{mxData}.
Raw data input with a target of \code{cov} or \code{cor} type requires the \code{numObsMZ} and \code{numObsDZ} to be set.
Type "WLS", "DWLS", or "ULS", data remain raw, but are handled as WLS in the \code{\link[OpenMx:mxFitFunctionWLS]{OpenMx::mxFitFunctionWLS()}}.
Unused columns are dropped.
If you pass in raw data, you can't request type cov/cor yet. Will work on this if desired.
}
\examples{
# ==============
# = Continuous =
# ==============
library(umx)
data(twinData)
twinData = umx_scale(twinData, varsToScale= c('ht1','ht2'))
mzData = twinData[twinData$zygosity \%in\% "MZFF",]
dzData = twinData[twinData$zygosity \%in\% "DZFF",]
m1= xmu_make_TwinSuperModel(mzData=mzData, dzData=dzData, selDVs=c("wt","ht"), sep="", nSib=2)
names(m1) # "top" "MZ" "DZ"
class(m1$MZ$fitfunction)[[1]] == "MxFitFunctionML"
# ====================
# = With a covariate =
# ====================
m1= xmu_make_TwinSuperModel(mzData=mzData, dzData=dzData,
selDVs= "wt", selCovs= "age", sep="", nSib=2)
m1$top$intercept$labels
m1$MZ$expMean
# ===============
# = WLS example =
# ===============
m1=xmu_make_TwinSuperModel(mzData=mzData, dzData=dzData,selDVs=c("wt","ht"),sep="",type="WLS")
class(m1$MZ$fitfunction)[[1]] == "MxFitFunctionWLS"
m1$MZ$fitfunction$type =="WLS"
# Check default all-continuous method
m1$MZ$fitfunction$continuousType == "cumulants"
# Choose non-default type (DWLS)
m1= xmu_make_TwinSuperModel(mzData= mzData, dzData= dzData,
selDVs= c("wt","ht"), sep="", type="DWLS")
m1$MZ$fitfunction$type =="DWLS"
class(m1$MZ$fitfunction)[[1]] == "MxFitFunctionWLS"
# Switch WLS method
m1 = xmu_make_TwinSuperModel(mzData= mzData, dzData= dzData, selDVs= c("wt","ht"), sep= "",
type = "WLS", allContinuousMethod = "marginals")
m1$MZ$fitfunction$continuousType == "marginals"
class(m1$MZ$fitfunction)[[1]] == "MxFitFunctionWLS"
# ============================================
# = Bivariate continuous and ordinal example =
# ============================================
data(twinData)
selDVs = c("wt", "obese")
# Cut BMI column to form ordinal obesity variables
ordDVs = c("obese1", "obese2")
obesityLevels = c('normal', 'overweight', 'obese')
cutPoints = quantile(twinData[, "bmi1"], probs = c(.5, .2), na.rm = TRUE)
twinData$obese1 = cut(twinData$bmi1, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels)
twinData$obese2 = cut(twinData$bmi2, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels)
# Make the ordinal variables into mxFactors (ensure ordered is TRUE, and require levels)
twinData[, ordDVs] = umxFactor(twinData[, ordDVs])
mzData = twinData[twinData$zygosity \%in\% "MZFF",]
dzData = twinData[twinData$zygosity \%in\% "DZFF",]
m1 = xmu_make_TwinSuperModel(mzData= mzData, dzData= dzData, selDVs= selDVs, sep="", nSib= 2)
names(m1) # "top" "MZ" "DZ"
# ==============
# = One binary =
# ==============
data(twinData)
cutPoints = quantile(twinData[, "bmi1"], probs = .2, na.rm = TRUE)
obesityLevels = c('normal', 'obese')
twinData$obese1 = cut(twinData$bmi1, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels)
twinData$obese2 = cut(twinData$bmi2, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels)
ordDVs = c("obese1", "obese2")
twinData[, ordDVs] = umxFactor(twinData[, ordDVs])
selDVs = c("wt", "obese")
mzData = twinData[twinData$zygosity \%in\% "MZFF",]
dzData = twinData[twinData$zygosity \%in\% "DZFF",]
m1 = xmu_make_TwinSuperModel(mzData= mzData, dzData= dzData, selDVs= selDVs, sep= "", nSib= 2)
# ========================================
# = Cov data (calls xmuTwinSuper_CovCor) =
# ========================================
data(twinData)
mzData =cov(twinData[twinData$zygosity \%in\% "MZFF", tvars(c("wt","ht"), sep="")], use="complete")
dzData =cov(twinData[twinData$zygosity \%in\% "DZFF", tvars(c("wt","ht"), sep="")], use="complete")
m1 = xmu_make_TwinSuperModel(mzData= mzData, dzData= dzData, selDVs= "wt", sep= "",
nSib= 2, numObsMZ = 100, numObsDZ = 100, verbose=TRUE)
class(m1$MZ$fitfunction)[[1]] =="MxFitFunctionML"
dimnames(m1$MZ$data$observed)[[1]]==c("wt1", "wt2")
}
\seealso{
Other xmu internal not for end user:
\code{\link{umxModel}()},
\code{\link{umxRenameMatrix}()},
\code{\link{umx_APA_pval}()},
\code{\link{umx_fun_mean_sd}()},
\code{\link{umx_get_bracket_addresses}()},
\code{\link{umx_make}()},
\code{\link{umx_standardize}()},
\code{\link{umx_string_to_algebra}()},
\code{\link{xmuHasSquareBrackets}()},
\code{\link{xmuLabel_MATRIX_Model}()},
\code{\link{xmuLabel_Matrix}()},
\code{\link{xmuLabel_RAM_Model}()},
\code{\link{xmuMI}()},
\code{\link{xmuMakeDeviationThresholdsMatrices}()},
\code{\link{xmuMakeOneHeadedPathsFromPathList}()},
\code{\link{xmuMakeTwoHeadedPathsFromPathList}()},
\code{\link{xmuMaxLevels}()},
\code{\link{xmuMinLevels}()},
\code{\link{xmuPropagateLabels}()},
\code{\link{xmuRAM2Ordinal}()},
\code{\link{xmuTwinSuper_Continuous}()},
\code{\link{xmuTwinSuper_NoBinary}()},
\code{\link{xmuTwinUpgradeMeansToCovariateModel}()},
\code{\link{xmu_CI_merge}()},
\code{\link{xmu_CI_stash}()},
\code{\link{xmu_DF_to_mxData_TypeCov}()},
\code{\link{xmu_PadAndPruneForDefVars}()},
\code{\link{xmu_bracket_address2rclabel}()},
\code{\link{xmu_cell_is_on}()},
\code{\link{xmu_check_levels_identical}()},
\code{\link{xmu_check_needs_means}()},
\code{\link{xmu_check_variance}()},
\code{\link{xmu_clean_label}()},
\code{\link{xmu_data_missing}()},
\code{\link{xmu_data_swap_a_block}()},
\code{\link{xmu_describe_data_WLS}()},
\code{\link{xmu_dot_make_paths}()},
\code{\link{xmu_dot_make_residuals}()},
\code{\link{xmu_dot_maker}()},
\code{\link{xmu_dot_move_ranks}()},
\code{\link{xmu_dot_rank_str}()},
\code{\link{xmu_extract_column}()},
\code{\link{xmu_get_CI}()},
\code{\link{xmu_lavaan_process_group}()},
\code{\link{xmu_make_bin_cont_pair_data}()},
\code{\link{xmu_make_mxData}()},
\code{\link{xmu_match.arg}()},
\code{\link{xmu_name_from_lavaan_str}()},
\code{\link{xmu_path2twin}()},
\code{\link{xmu_path_regex}()},
\code{\link{xmu_print_algebras}()},
\code{\link{xmu_rclabel_2_bracket_address}()},
\code{\link{xmu_safe_run_summary}()},
\code{\link{xmu_set_sep_from_suffix}()},
\code{\link{xmu_show_fit_or_comparison}()},
\code{\link{xmu_simplex_corner}()},
\code{\link{xmu_standardize_ACEcov}()},
\code{\link{xmu_standardize_ACEv}()},
\code{\link{xmu_standardize_ACE}()},
\code{\link{xmu_standardize_CP}()},
\code{\link{xmu_standardize_IP}()},
\code{\link{xmu_standardize_RAM}()},
\code{\link{xmu_standardize_SexLim}()},
\code{\link{xmu_standardize_Simplex}()},
\code{\link{xmu_start_value_list}()},
\code{\link{xmu_starts}()},
\code{\link{xmu_summary_RAM_group_parameters}()},
\code{\link{xmu_twin_add_WeightMatrices}()},
\code{\link{xmu_twin_check}()},
\code{\link{xmu_twin_get_var_names}()},
\code{\link{xmu_twin_make_def_means_mats_and_alg}()},
\code{\link{xmu_twin_upgrade_selDvs2SelVars}()}
}
\concept{xmu internal not for end user}
|
#' @useDynLib GGEE
#' @importFrom Rcpp sourceCpp
GGEE <- function(X, Y, listGenesSNP) {
genes <- names(listGenesSNP)
NamesSNP <- sapply(genes, function(x) SNPinGene(x, 1, nbSNPcausaux = NULL, listGenes = listGenesSNP))
if (is.matrix(NamesSNP)) {
NamesSNP = as.list(data.frame(NamesSNP))
}
nbSNPbyGene <- sapply(NamesSNP, function(x) length(x))
Z <- matrix(, nrow = dim(X)[1])
for (i in 1:(length(genes) - 1)) {
X1 <- as.matrix(X[, as.character(NamesSNP[[i]])])
for (k in (i + 1):(length(genes))) {
X2 <- as.matrix(X[, as.character(NamesSNP[[k]])])
print(paste("X", genes[i], genes[k], sep="."))
resProd <- IntProd(X1,X2, i, k)
W <- as.matrix(resProd$W)
colnames(W) <- resProd$names
W <- scale(W, center = TRUE, scale = TRUE)
A <- t(W) %*% Y
A <-as.vector(A)
u <- A/sqrt(A%*%A)
#u <- A/sqrt(sum(A^2))
z <- W %*% u
colnames(z) <- paste("X", genes[i], genes[k], sep = ".")
Z <- cbind(Z, z)
}
}
Z <- as.matrix(Z[, -1])
Z <- scale(Z, center = TRUE, scale = TRUE)
interLength <- rep(1, dim(Z)[2])
names(interLength) <- colnames(Z)
return(list(Int = Z, interLength = interLength))
}
SNPinGene <- function(gene, portionSNP, nbSNPcausaux, listGenes) {
nbSNP <- length(listGenes[[gene]])
if(nbSNP == 1){
listGenes[[gene]]
}else{
if(is.null(nbSNPcausaux)){
nbCausalSNP <- nbSNP*portionSNP
}
else{
nbCausalSNP <- nbSNPcausaux
}
listGenes[[gene]][1:nbCausalSNP]
}
}
between <- function(X1, X2, nbcomp, nameX1, nameX2) {
nbcomp1 = nbcomp
nbcomp2 = nbcomp
nbrow = length(X1)
if (is.vector(X1) == FALSE & is.vector(X2) == FALSE) {
nbrow = dim(X1)[1]
colnames(X1) <- c(1:dim(X1)[2])
colnames(X2) <- c(1:dim(X2)[2])
cca <- cancor(X1, X2)
a1 <- cca$xcoef[, 1]
a2 <- cca$ycoef[, 1]
if (length(a1) < dim(X1)[2]) {
n1 <- which( !(1:dim(X1)[2] %in% as.numeric(names(a1))))
X1 <- X1[, -n1]
# suppression des variables qui n'ont pas de coef dans a1
}
if (length(a2) < dim(X2)[2]) {
n2 <- which(!(1:dim(X2)[2] %in% as.numeric(names(a2))))
X2 <- X2[, -n2]
}
}
if (is.vector(X1) == TRUE & is.vector(X2) == FALSE) {
nbrow = length(X1)
colnames(X2) <- c(1:dim(X2)[2])
cca <- cancor(X1, X2)
a1 <- cca$xcoef[, 1]
a2 <- cca$ycoef[, 1]
if (length(a2) < dim(X2)[2]) {
n2 <- which(!(1:dim(X2)[2] %in% as.numeric(names(a2))))
X2 <- X2[, -n2]
}
}
if (is.vector(X1) == FALSE & is.vector(X2) == TRUE) {
nbrow = dim(X1)[1]
colnames(X1) <- c(1:dim(X1)[2])
cca <- cancor(X1, X2)
a1 <- cca$xcoef[, 1]
a2 <- cca$ycoef[, 1]
if (length(a1) < dim(X1)[2]) {
n1 <- which(!(1:dim(X1)[2] %in% as.numeric(names(a1))))
X1 <- X1[, -n1]
}
}
cca2 <- cancor(X1, X2)
if (dim(cca2$xcoef)[2] < nbcomp1) {
nbcomp1 = dim(cca2$xcoef)[2]
}
if (dim(cca2$ycoef)[2] < nbcomp2) {
nbcomp2 = dim(cca2$ycoef)[2]
}
A1 <- cca2$xcoef[, 1:nbcomp1]
A2 <- cca2$ycoef[, 1:nbcomp2]
I <- data.frame(matrix(1, nrow = nbrow))
for (i in seq(min(nbcomp1, nbcomp2))) {
a1 <- as.matrix(A1)[, i]
a2 <- as.matrix(A2)[, i]
Z1 <- t(a1 %*% t(X1))
Z2 <- t(a2 %*% t(X2))
int <- Z1 * Z2
int <- data.frame(int)
names(int) <- paste(nameX1, nameX2, i, i, sep = ".")
I <- cbind(I, int)
}
I <- I[, -1]
if (is.vector(I)) {
I <- as.matrix(I)
colnames(I) <- names(int)
}
# nbVar <- nbcomp
nbVar <- dim(I)[2]
names(nbVar) <- paste("X", nameX1, nameX2, sep = ".")
return(list(Int = I, nbVar = nbVar))
}
between.mat <- function(X, G, nbcomp) {
if (is.null(nbcomp)) {
nbcomp = 1
}
genes <- unique(G)
nbGroup <- length(levels(as.factor(G)))
B <- data.frame(matrix(1, nrow = dim(X)[1]))
d <- data.frame(matrix(1, ncol = 2))
interLength <- c()
for (i in 1:(nbGroup - 1)) {
f <- i + 1
for (k in f:nbGroup) {
X1 <- X[, G == genes[i]] #1er groupe de variables
X2 <- X[, G == genes[k]] #2em groupe de variables
betw <- between(X1, X2, nbcomp, nameX1 = genes[i], nameX2 = genes[k])
newVar <- betw$Int
# newVar<-data.frame(newVar) colnames(newVar) <-paste('X', genes[i], genes[k],
# sep='.')
B <- data.frame(B, newVar)
d <- rbind(d, c(i, k))
nbVar <- betw$nbVar
# names(nbVar) <- paste(genes[i], genes[k], sep='.')
interLength <- c(interLength, nbVar)
}
}
nam <- colnames(B)[-1]
B <- as.data.frame(B[, -1])
colnames(B) <- nam
d <- d[-1, ]
return(list(XBet = B, ident = d, interLength = interLength, nbVar = nbVar))
}
PCAGenes <- function(X, listGenesSNP, nbcomp) {
nbGenes <- length(listGenesSNP)
namesGenes <- names(listGenesSNP)
Xacp <- c()
allnbComp <- c()
ResACPvar <- list()
for (i in seq(namesGenes)) {
red <- choixGenes(genes = namesGenes[i], X, listGenesSNP = listGenesSNP)
Xred <- red$Xred
if(dim(Xred)[2]== 1){
newVar <- Xred
colnames(newVar) <- paste(namesGenes[i], 1, sep =".")
allnbComp <-c(allnbComp,1)
}else{
ResACP <- FactoMineR::PCA(Xred, scale.unit = TRUE, ncp = dim(Xred)[2], graph = F)
eig <- list(ResACP$eig)
names(eig) <- namesGenes[i]
ResACPvar <- c(ResACPvar, eig)
if (dim(Xred)[2]< nbcomp){
nbComp <- dim(Xred)[2]
}else{
nbComp <- nbcomp
}
allnbComp <- c(allnbComp, nbComp)
newVar <- as.matrix(ResACP$ind$coord[, c(1:nbComp)])
namesVar <- c()
for (j in seq(nbComp)) {
namesVar <- c(namesVar, paste(namesGenes[i], j, sep = "."))
}
colnames(newVar) <- namesVar
}
Xacp <- cbind(Xacp, newVar)
}
G <- rep(seq(nbGenes), allnbComp)
I <- data.frame(matrix(1, nrow = dim(Xacp)[1]))
interLength <- c()
d <- data.frame(matrix(1, ncol = 2))
c <- c()
for (g1 in 1:(nbGenes - 1)) {
for (g2 in (g1 + 1):nbGenes) {
nbVar1 <- allnbComp[g1]
nbVar2 <- allnbComp[g2]
for (i in seq(nbVar1)) {
for (k in seq(nbVar2)) {
X1 <- as.matrix(Xacp[, G == g1])[, i]
X2 <- as.matrix(Xacp[, G == g2])[, k]
Y <- X1 * X2
Y <- data.frame(Y)
colnames(Y) <- paste(namesGenes[g1], namesGenes[g2], i, k, sep = ".")
I <- cbind(I, Y)
}
}
nbVar <- nbVar1 * nbVar2
names(nbVar) <- paste("X", namesGenes[g1], namesGenes[g2], sep = ".")
interLength <- c(interLength, nbVar)
d <- rbind(d, c(g1, g2))
}
}
I <- I[, -1]
d <- d[-1, ]
if (is.vector(I)) {
I <- as.matrix(I)
colnames(I) <- colnames(Y)
}
XacpwInt <- as.matrix(cbind(Xacp, I))
return(list(XacpwInt = XacpwInt, Int = I, interLength = interLength, ResACP = ResACPvar))
}
# Allow to select a reduce number of gene among a dataset
choixGenes <- function(genes, X, listGenesSNP){
genesLength <- sapply(listGenesSNP, function(x) length(x))
nbSNPNames <- sum(genesLength)
nbSNP <- dim(X)[2]
if(nbSNPNames < nbSNP){print("Warning, SNPs from matrix X are not referenced in the gene list")}
SNPaGarder <-c()
groupsGenes <- c()
listGenesRed <- c()
for(i in seq(genes)){
SNPaGarder <- c(SNPaGarder,unlist(listGenesSNP[genes[[i]]]))
groupsGenes <- c(groupsGenes,rep(genes[[i]], genesLength[genes[[i]]]))
listGenesRed <- c(listGenesRed, listGenesSNP[genes[[i]]])
}
Xred <- as.matrix(X[,SNPaGarder])
colnames(Xred)<- SNPaGarder
list(Xred=Xred, groupsGenes=groupsGenes, listGenesRed =listGenesRed)
}
PLSGenes <- function(X, Y, listGenesSNP, nbcomp = NULL) {
nbGenes <- length(listGenesSNP)
namesGenes <- names(listGenesSNP)
I <- data.frame(matrix(1, nrow = dim(X)[1]))
d <- data.frame(matrix(1, ncol = 2))
interLength <- c()
for (i in 1:(nbGenes - 1)) {
for (j in (i + 1):nbGenes) {
red1 <- choixGenes(genes = namesGenes[i], X, listGenesSNP = listGenesSNP)
g1 <- red1$Xred
red2 <- choixGenes(genes = namesGenes[j], X, listGenesSNP = listGenesSNP)
g2 <- red2$Xred
if (is.null(nbcomp)) {
pls = pls::plsr(as.matrix(cbind(Y, g1)) ~ g2, validation = "LOO")
} else {
if (dim(cbind(Y, g1))[2] < nbcomp | dim(g2)[2] < nbcomp) {
nbcomp = min(dim(cbind(Y, g1))[2], dim(g2))
}
pls = pls::plsr(as.matrix(cbind(Y, g1)) ~ g2, ncomp = nbcomp, validation = "LOO")
}
scPLS <- pls::scores(pls)
nbCompPLS <- dim(scPLS)[2]
scPLS <- data.frame(scPLS[1:dim(X)[1], 1:nbCompPLS])
noms <- c()
for (k in seq(nbCompPLS)) {
noms <- c(noms, paste(namesGenes[i], namesGenes[j], k, sep = "."))
}
colnames(scPLS) <- noms
names(nbCompPLS) <- paste("X", namesGenes[i], namesGenes[j], sep = ".")
interLength <- c(interLength, nbCompPLS)
I = cbind(I, scPLS)
d <- rbind(d, c(i, j))
}
}
I <- I[, -1]
d <- d[-1, ]
return(list(Int = I, interLength = interLength))
}
| /R/InteractionMethods.r | no_license | xiyuansun/GGEE | R | false | false | 10,078 | r | #' @useDynLib GGEE
#' @importFrom Rcpp sourceCpp
GGEE <- function(X, Y, listGenesSNP) {
genes <- names(listGenesSNP)
NamesSNP <- sapply(genes, function(x) SNPinGene(x, 1, nbSNPcausaux = NULL, listGenes = listGenesSNP))
if (is.matrix(NamesSNP)) {
NamesSNP = as.list(data.frame(NamesSNP))
}
nbSNPbyGene <- sapply(NamesSNP, function(x) length(x))
Z <- matrix(, nrow = dim(X)[1])
for (i in 1:(length(genes) - 1)) {
X1 <- as.matrix(X[, as.character(NamesSNP[[i]])])
for (k in (i + 1):(length(genes))) {
X2 <- as.matrix(X[, as.character(NamesSNP[[k]])])
print(paste("X", genes[i], genes[k], sep="."))
resProd <- IntProd(X1,X2, i, k)
W <- as.matrix(resProd$W)
colnames(W) <- resProd$names
W <- scale(W, center = TRUE, scale = TRUE)
A <- t(W) %*% Y
A <-as.vector(A)
u <- A/sqrt(A%*%A)
#u <- A/sqrt(sum(A^2))
z <- W %*% u
colnames(z) <- paste("X", genes[i], genes[k], sep = ".")
Z <- cbind(Z, z)
}
}
Z <- as.matrix(Z[, -1])
Z <- scale(Z, center = TRUE, scale = TRUE)
interLength <- rep(1, dim(Z)[2])
names(interLength) <- colnames(Z)
return(list(Int = Z, interLength = interLength))
}
SNPinGene <- function(gene, portionSNP, nbSNPcausaux, listGenes) {
nbSNP <- length(listGenes[[gene]])
if(nbSNP == 1){
listGenes[[gene]]
}else{
if(is.null(nbSNPcausaux)){
nbCausalSNP <- nbSNP*portionSNP
}
else{
nbCausalSNP <- nbSNPcausaux
}
listGenes[[gene]][1:nbCausalSNP]
}
}
between <- function(X1, X2, nbcomp, nameX1, nameX2) {
nbcomp1 = nbcomp
nbcomp2 = nbcomp
nbrow = length(X1)
if (is.vector(X1) == FALSE & is.vector(X2) == FALSE) {
nbrow = dim(X1)[1]
colnames(X1) <- c(1:dim(X1)[2])
colnames(X2) <- c(1:dim(X2)[2])
cca <- cancor(X1, X2)
a1 <- cca$xcoef[, 1]
a2 <- cca$ycoef[, 1]
if (length(a1) < dim(X1)[2]) {
n1 <- which( !(1:dim(X1)[2] %in% as.numeric(names(a1))))
X1 <- X1[, -n1]
# suppression des variables qui n'ont pas de coef dans a1
}
if (length(a2) < dim(X2)[2]) {
n2 <- which(!(1:dim(X2)[2] %in% as.numeric(names(a2))))
X2 <- X2[, -n2]
}
}
if (is.vector(X1) == TRUE & is.vector(X2) == FALSE) {
nbrow = length(X1)
colnames(X2) <- c(1:dim(X2)[2])
cca <- cancor(X1, X2)
a1 <- cca$xcoef[, 1]
a2 <- cca$ycoef[, 1]
if (length(a2) < dim(X2)[2]) {
n2 <- which(!(1:dim(X2)[2] %in% as.numeric(names(a2))))
X2 <- X2[, -n2]
}
}
if (is.vector(X1) == FALSE & is.vector(X2) == TRUE) {
nbrow = dim(X1)[1]
colnames(X1) <- c(1:dim(X1)[2])
cca <- cancor(X1, X2)
a1 <- cca$xcoef[, 1]
a2 <- cca$ycoef[, 1]
if (length(a1) < dim(X1)[2]) {
n1 <- which(!(1:dim(X1)[2] %in% as.numeric(names(a1))))
X1 <- X1[, -n1]
}
}
cca2 <- cancor(X1, X2)
if (dim(cca2$xcoef)[2] < nbcomp1) {
nbcomp1 = dim(cca2$xcoef)[2]
}
if (dim(cca2$ycoef)[2] < nbcomp2) {
nbcomp2 = dim(cca2$ycoef)[2]
}
A1 <- cca2$xcoef[, 1:nbcomp1]
A2 <- cca2$ycoef[, 1:nbcomp2]
I <- data.frame(matrix(1, nrow = nbrow))
for (i in seq(min(nbcomp1, nbcomp2))) {
a1 <- as.matrix(A1)[, i]
a2 <- as.matrix(A2)[, i]
Z1 <- t(a1 %*% t(X1))
Z2 <- t(a2 %*% t(X2))
int <- Z1 * Z2
int <- data.frame(int)
names(int) <- paste(nameX1, nameX2, i, i, sep = ".")
I <- cbind(I, int)
}
I <- I[, -1]
if (is.vector(I)) {
I <- as.matrix(I)
colnames(I) <- names(int)
}
# nbVar <- nbcomp
nbVar <- dim(I)[2]
names(nbVar) <- paste("X", nameX1, nameX2, sep = ".")
return(list(Int = I, nbVar = nbVar))
}
between.mat <- function(X, G, nbcomp) {
if (is.null(nbcomp)) {
nbcomp = 1
}
genes <- unique(G)
nbGroup <- length(levels(as.factor(G)))
B <- data.frame(matrix(1, nrow = dim(X)[1]))
d <- data.frame(matrix(1, ncol = 2))
interLength <- c()
for (i in 1:(nbGroup - 1)) {
f <- i + 1
for (k in f:nbGroup) {
X1 <- X[, G == genes[i]] #1er groupe de variables
X2 <- X[, G == genes[k]] #2em groupe de variables
betw <- between(X1, X2, nbcomp, nameX1 = genes[i], nameX2 = genes[k])
newVar <- betw$Int
# newVar<-data.frame(newVar) colnames(newVar) <-paste('X', genes[i], genes[k],
# sep='.')
B <- data.frame(B, newVar)
d <- rbind(d, c(i, k))
nbVar <- betw$nbVar
# names(nbVar) <- paste(genes[i], genes[k], sep='.')
interLength <- c(interLength, nbVar)
}
}
nam <- colnames(B)[-1]
B <- as.data.frame(B[, -1])
colnames(B) <- nam
d <- d[-1, ]
return(list(XBet = B, ident = d, interLength = interLength, nbVar = nbVar))
}
PCAGenes <- function(X, listGenesSNP, nbcomp) {
nbGenes <- length(listGenesSNP)
namesGenes <- names(listGenesSNP)
Xacp <- c()
allnbComp <- c()
ResACPvar <- list()
for (i in seq(namesGenes)) {
red <- choixGenes(genes = namesGenes[i], X, listGenesSNP = listGenesSNP)
Xred <- red$Xred
if(dim(Xred)[2]== 1){
newVar <- Xred
colnames(newVar) <- paste(namesGenes[i], 1, sep =".")
allnbComp <-c(allnbComp,1)
}else{
ResACP <- FactoMineR::PCA(Xred, scale.unit = TRUE, ncp = dim(Xred)[2], graph = F)
eig <- list(ResACP$eig)
names(eig) <- namesGenes[i]
ResACPvar <- c(ResACPvar, eig)
if (dim(Xred)[2]< nbcomp){
nbComp <- dim(Xred)[2]
}else{
nbComp <- nbcomp
}
allnbComp <- c(allnbComp, nbComp)
newVar <- as.matrix(ResACP$ind$coord[, c(1:nbComp)])
namesVar <- c()
for (j in seq(nbComp)) {
namesVar <- c(namesVar, paste(namesGenes[i], j, sep = "."))
}
colnames(newVar) <- namesVar
}
Xacp <- cbind(Xacp, newVar)
}
G <- rep(seq(nbGenes), allnbComp)
I <- data.frame(matrix(1, nrow = dim(Xacp)[1]))
interLength <- c()
d <- data.frame(matrix(1, ncol = 2))
c <- c()
for (g1 in 1:(nbGenes - 1)) {
for (g2 in (g1 + 1):nbGenes) {
nbVar1 <- allnbComp[g1]
nbVar2 <- allnbComp[g2]
for (i in seq(nbVar1)) {
for (k in seq(nbVar2)) {
X1 <- as.matrix(Xacp[, G == g1])[, i]
X2 <- as.matrix(Xacp[, G == g2])[, k]
Y <- X1 * X2
Y <- data.frame(Y)
colnames(Y) <- paste(namesGenes[g1], namesGenes[g2], i, k, sep = ".")
I <- cbind(I, Y)
}
}
nbVar <- nbVar1 * nbVar2
names(nbVar) <- paste("X", namesGenes[g1], namesGenes[g2], sep = ".")
interLength <- c(interLength, nbVar)
d <- rbind(d, c(g1, g2))
}
}
I <- I[, -1]
d <- d[-1, ]
if (is.vector(I)) {
I <- as.matrix(I)
colnames(I) <- colnames(Y)
}
XacpwInt <- as.matrix(cbind(Xacp, I))
return(list(XacpwInt = XacpwInt, Int = I, interLength = interLength, ResACP = ResACPvar))
}
# Allow to select a reduce number of gene among a dataset
choixGenes <- function(genes, X, listGenesSNP){
genesLength <- sapply(listGenesSNP, function(x) length(x))
nbSNPNames <- sum(genesLength)
nbSNP <- dim(X)[2]
if(nbSNPNames < nbSNP){print("Warning, SNPs from matrix X are not referenced in the gene list")}
SNPaGarder <-c()
groupsGenes <- c()
listGenesRed <- c()
for(i in seq(genes)){
SNPaGarder <- c(SNPaGarder,unlist(listGenesSNP[genes[[i]]]))
groupsGenes <- c(groupsGenes,rep(genes[[i]], genesLength[genes[[i]]]))
listGenesRed <- c(listGenesRed, listGenesSNP[genes[[i]]])
}
Xred <- as.matrix(X[,SNPaGarder])
colnames(Xred)<- SNPaGarder
list(Xred=Xred, groupsGenes=groupsGenes, listGenesRed =listGenesRed)
}
PLSGenes <- function(X, Y, listGenesSNP, nbcomp = NULL) {
nbGenes <- length(listGenesSNP)
namesGenes <- names(listGenesSNP)
I <- data.frame(matrix(1, nrow = dim(X)[1]))
d <- data.frame(matrix(1, ncol = 2))
interLength <- c()
for (i in 1:(nbGenes - 1)) {
for (j in (i + 1):nbGenes) {
red1 <- choixGenes(genes = namesGenes[i], X, listGenesSNP = listGenesSNP)
g1 <- red1$Xred
red2 <- choixGenes(genes = namesGenes[j], X, listGenesSNP = listGenesSNP)
g2 <- red2$Xred
if (is.null(nbcomp)) {
pls = pls::plsr(as.matrix(cbind(Y, g1)) ~ g2, validation = "LOO")
} else {
if (dim(cbind(Y, g1))[2] < nbcomp | dim(g2)[2] < nbcomp) {
nbcomp = min(dim(cbind(Y, g1))[2], dim(g2))
}
pls = pls::plsr(as.matrix(cbind(Y, g1)) ~ g2, ncomp = nbcomp, validation = "LOO")
}
scPLS <- pls::scores(pls)
nbCompPLS <- dim(scPLS)[2]
scPLS <- data.frame(scPLS[1:dim(X)[1], 1:nbCompPLS])
noms <- c()
for (k in seq(nbCompPLS)) {
noms <- c(noms, paste(namesGenes[i], namesGenes[j], k, sep = "."))
}
colnames(scPLS) <- noms
names(nbCompPLS) <- paste("X", namesGenes[i], namesGenes[j], sep = ".")
interLength <- c(interLength, nbCompPLS)
I = cbind(I, scPLS)
d <- rbind(d, c(i, j))
}
}
I <- I[, -1]
d <- d[-1, ]
return(list(Int = I, interLength = interLength))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{filterMEData}
\alias{filterMEData}
\title{Function to filter the MRexperiment data by numerical parameters}
\usage{
filterMEData(MRobj, minpresence = 1, minfeats = 2, minreads = 2)
}
\arguments{
\item{MRobj}{MRExperiment object to filter}
\item{minpresence}{minimum sample presence per feature}
\item{minfeats}{minimum number of features per sample}
\item{minreads}{minimum number of reads per sample}
}
\value{
the filtered MRobj
}
\description{
Function to filter the MRexperiment data by numerical parameters
}
\examples{
data("mouseData", package = "metagenomeSeq")
filterMEData(MRobj = mouseData, minpresence = 4, minfeats = 300)
}
\author{
Janina Reeder
}
| /man/filterMEData.Rd | permissive | saracg-forks/microbiomeExplorer | R | false | true | 762 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{filterMEData}
\alias{filterMEData}
\title{Function to filter the MRexperiment data by numerical parameters}
\usage{
filterMEData(MRobj, minpresence = 1, minfeats = 2, minreads = 2)
}
\arguments{
\item{MRobj}{MRExperiment object to filter}
\item{minpresence}{minimum sample presence per feature}
\item{minfeats}{minimum number of features per sample}
\item{minreads}{minimum number of reads per sample}
}
\value{
the filtered MRobj
}
\description{
Function to filter the MRexperiment data by numerical parameters
}
\examples{
data("mouseData", package = "metagenomeSeq")
filterMEData(MRobj = mouseData, minpresence = 4, minfeats = 300)
}
\author{
Janina Reeder
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/module-pickerGroup.R
\name{pickerGroup-module}
\alias{pickerGroup-module}
\alias{pickerGroupUI}
\alias{pickerGroupServer}
\title{Picker Group}
\usage{
pickerGroupUI(id, params, label = NULL, btn_label = "Reset filters",
options = list())
pickerGroupServer(input, output, session, data, vars)
}
\arguments{
\item{id}{Module's id.}
\item{params}{a named list of parameters passed to each `pickerInput`, you can use :
`inputId` (obligatory, must be variable name), `label`, `placeholder`.}
\item{label}{character, global label on top of all labels.}
\item{btn_label}{reset button label.}
\item{options}{See \code{\link{pickerInput}} options argument.}
\item{input}{standard \code{shiny} input.}
\item{output}{standard \code{shiny} output.}
\item{session}{standard \code{shiny} session.}
\item{data}{a \code{data.frame}, or an object coercible to \code{data.frame}.}
\item{vars}{character, columns to use to create filters,
must correspond to variables listed in \code{params}.}
}
\value{
a \code{reactive} function containing data filtered.
}
\description{
Group of mutually dependent `pickerInput` for filtering data.frame's columns.
}
\examples{
\dontrun{
if (interactive()) {
library(shiny)
library(shinyWidgets)
data("mpg", package = "ggplot2")
ui <- fluidPage(
fluidRow(
column(
width = 10, offset = 1,
tags$h3("Filter data with picker group"),
panel(
pickerGroupUI(
id = "my-filters",
params = list(
manufacturer = list(inputId = "manufacturer", title = "Manufacturer:"),
model = list(inputId = "model", title = "Model:"),
trans = list(inputId = "trans", title = "Trans:"),
class = list(inputId = "class", title = "Class:")
)
), status = "primary"
),
dataTableOutput(outputId = "table")
)
)
)
server <- function(input, output, session) {
res_mod <- callModule(
module = pickerGroupServer,
id = "my-filters",
data = mpg,
vars = c("manufacturer", "model", "trans", "class")
)
output$table <- renderDataTable(res_mod())
}
shinyApp(ui, server)
}
}
}
| /man/pickerGroup-module.Rd | permissive | DataXujing/shinyWidgets | R | false | true | 2,207 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/module-pickerGroup.R
\name{pickerGroup-module}
\alias{pickerGroup-module}
\alias{pickerGroupUI}
\alias{pickerGroupServer}
\title{Picker Group}
\usage{
pickerGroupUI(id, params, label = NULL, btn_label = "Reset filters",
options = list())
pickerGroupServer(input, output, session, data, vars)
}
\arguments{
\item{id}{Module's id.}
\item{params}{a named list of parameters passed to each `pickerInput`, you can use :
`inputId` (obligatory, must be variable name), `label`, `placeholder`.}
\item{label}{character, global label on top of all labels.}
\item{btn_label}{reset button label.}
\item{options}{See \code{\link{pickerInput}} options argument.}
\item{input}{standard \code{shiny} input.}
\item{output}{standard \code{shiny} output.}
\item{session}{standard \code{shiny} session.}
\item{data}{a \code{data.frame}, or an object coercible to \code{data.frame}.}
\item{vars}{character, columns to use to create filters,
must correspond to variables listed in \code{params}.}
}
\value{
a \code{reactive} function containing data filtered.
}
\description{
Group of mutually dependent `pickerInput` for filtering data.frame's columns.
}
\examples{
\dontrun{
if (interactive()) {
library(shiny)
library(shinyWidgets)
data("mpg", package = "ggplot2")
ui <- fluidPage(
fluidRow(
column(
width = 10, offset = 1,
tags$h3("Filter data with picker group"),
panel(
pickerGroupUI(
id = "my-filters",
params = list(
manufacturer = list(inputId = "manufacturer", title = "Manufacturer:"),
model = list(inputId = "model", title = "Model:"),
trans = list(inputId = "trans", title = "Trans:"),
class = list(inputId = "class", title = "Class:")
)
), status = "primary"
),
dataTableOutput(outputId = "table")
)
)
)
server <- function(input, output, session) {
res_mod <- callModule(
module = pickerGroupServer,
id = "my-filters",
data = mpg,
vars = c("manufacturer", "model", "trans", "class")
)
output$table <- renderDataTable(res_mod())
}
shinyApp(ui, server)
}
}
}
|
plot2 <- function(data)
{
par(mfrow = c(1,1))
with(data,plot(DateTime,Global_active_power,type="l",ylab="Global Active Power (kilowatts)", xlab=""))
dev.copy(png,file="plot2.png",width = 480, height = 480)
dev.off()
} | /plot2.R | no_license | Invictus666/ExData_Plotting1 | R | false | false | 225 | r | plot2 <- function(data)
{
par(mfrow = c(1,1))
with(data,plot(DateTime,Global_active_power,type="l",ylab="Global Active Power (kilowatts)", xlab=""))
dev.copy(png,file="plot2.png",width = 480, height = 480)
dev.off()
} |
hpc <- read.table("household_power_consumption.txt",
skip = 66637, nrow = 2880, sep = ";",na.strings="?" ,
stringsAsFactors=FALSE,
col.names = colnames(read.table(
"household_power_consumption.txt",
nrow = 1, header = TRUE, sep=";")))
#hpc$Date <- as.Date(hpc$Date, "%d/%m/%Y")
hpc <- cbind(hpc, DateTime=paste(hpc$Date,hpc$Time, sep=" "))
hpc$DateTime <- strptime(hpc$DateTime, "%d/%m/%Y %H:%M:%S")
#plot 2
png(filename ="plot2.png",width = 480, height = 480)
plot(hpc$DateTime,hpc$Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab="")
dev.off() | /plot2.R | no_license | rodbs/ExData_Plotting1 | R | false | false | 669 | r | hpc <- read.table("household_power_consumption.txt",
skip = 66637, nrow = 2880, sep = ";",na.strings="?" ,
stringsAsFactors=FALSE,
col.names = colnames(read.table(
"household_power_consumption.txt",
nrow = 1, header = TRUE, sep=";")))
#hpc$Date <- as.Date(hpc$Date, "%d/%m/%Y")
hpc <- cbind(hpc, DateTime=paste(hpc$Date,hpc$Time, sep=" "))
hpc$DateTime <- strptime(hpc$DateTime, "%d/%m/%Y %H:%M:%S")
#plot 2
png(filename ="plot2.png",width = 480, height = 480)
plot(hpc$DateTime,hpc$Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab="")
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bbox_tran.R
\name{bbox_tran}
\alias{bbox_tran}
\title{Generate bounding box for Neotoma}
\usage{
bbox_tran(x, coord_formula = "~ x + y", from, to)
}
\arguments{
\item{x}{A \code{data.frame} or \code{matrix} with the vegetation data and its coordinates.}
\item{coord_formula}{The formula, as a string}
\item{from}{The object \code{proj4} string (e.g., \code{+init=epsg:4121 +proj=longlat +ellps=GRS80}).}
\item{to}{The target \code{proj4} projection string (e.g., \code{+init=epsg:3175}).}
}
\value{
A \code{numeric} vector, of length 4.
}
\description{
From a vegetation matrix with \code{x} and \code{y} columns (or \code{lat}/\code{long}), generate a bounding box to be used for the \code{loc} parameter in the \code{neotoma} function \code{get_dataset()}.
}
\examples{
{
data(plss_vegetation)
pol_box <- bbox_tran(plss_vegetation, '~ x + y',
'+init=epsg:3175',
'+init=epsg:4326')
}
}
| /man/bbox_tran.Rd | permissive | amwillson/stepps-cal | R | false | true | 981 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bbox_tran.R
\name{bbox_tran}
\alias{bbox_tran}
\title{Generate bounding box for Neotoma}
\usage{
bbox_tran(x, coord_formula = "~ x + y", from, to)
}
\arguments{
\item{x}{A \code{data.frame} or \code{matrix} with the vegetation data and its coordinates.}
\item{coord_formula}{The formula, as a string}
\item{from}{The object \code{proj4} string (e.g., \code{+init=epsg:4121 +proj=longlat +ellps=GRS80}).}
\item{to}{The target \code{proj4} projection string (e.g., \code{+init=epsg:3175}).}
}
\value{
A \code{numeric} vector, of length 4.
}
\description{
From a vegetation matrix with \code{x} and \code{y} columns (or \code{lat}/\code{long}), generate a bounding box to be used for the \code{loc} parameter in the \code{neotoma} function \code{get_dataset()}.
}
\examples{
{
data(plss_vegetation)
pol_box <- bbox_tran(plss_vegetation, '~ x + y',
'+init=epsg:3175',
'+init=epsg:4326')
}
}
|
library(imager)
img.pic=load.image("/home/prinzz/Desktop/Work/Digital Image Processing/gitHub/test4.jpeg")
dim(img.pic)
size=rep(256,2)
img.resized=as.matrix(resize((img.pic),size[1],size[2]),size[1],size[2])
# img.resized2=as.matrix(resize(grayscale(img.pic),size[1],size[2]),size[1],size[2])
plot(as.cimg(img.resized))
img.padded=matrix(0,nrow(img.resized)*2,ncol(img.resized)*2)
P=nrow(img.padded)
Q=ncol(img.padded)
for (i in 1:nrow(img.resized)) {
for (j in 1:ncol(img.resized)) {
img.padded[i,j] = img.resized[i,j]
temp=i+j
# img.padded[i,j]=((-1)^temp)*img.padded[i,j]
}
}
plot(as.cimg(img.padded))
img.fft=fft(img.padded)
img.log=log(1+abs(img.fft))
plot(as.cimg(img.log))
kernel.mat=matrix(0,nrow(img.fft),ncol(img.fft))
distance=kernel.mat
nKernel=nrow(kernel.mat)
mKernel=ncol(kernel.mat)
for (i in 1:nKernel) {
for (j in 1:mKernel) {
temp=i+j
# img.fft[i,j] = (((-1)^temp)*img.fft[i,j])
}
}
for (i in 1:nKernel) {
for (j in 1:mKernel) {
distance[i,j] = sqrt((i-(P/2))^2+(j-(Q/2))^2)
}
}
cutOff <- 40
for (i in 1:nKernel) {
for (j in 1:mKernel) {
#
if(distance[i,j]>cutOff){ #IDEAL PASS FILTER
kernel.mat[i,j]=0
}else{
kernel.mat[i,j]=1
}
# kernel.mat[i,j]=(1/(1 + (distance[i,j]/cutOff)^4)) #BUTTERWORTH FILTER
# kernel.mat[i,j]=exp((-(distance[i,j])^2)/(2*(cutOff^2))) #GAUSSIAN FILTER
}
}
kernel.fft = fft(kernel.mat)
img.filtered = (kernel.fft)*img.fft
for (i in 1:nrow(img.filtered)) {
for (j in 1:ncol(img.filtered)) {
img.filtered[i,j]=Re(img.filtered[i,j])*(-1^(i+j))
}
}
img.ifft = Re(fft((img.filtered), inverse = TRUE))/length(img.filtered)
for (i in 1:nrow(img.ifft) ) {
for (j in 1:ncol(img.ifft)) {
img.ifft[i,j]=(img.ifft[i,j])*(-1^(i+j))
}
}
for (i in 1:(nrow(img.resized))) {
for (j in 1:(ncol(img.resized))) {
img.resized[i,j]=img.ifft[i,j]
}
}
# plot(as.cimg(Re(img.resized)+img.resized2))
img.log=log(1+abs(img.filtered))
# plot(as.cimg(img.log),rescale = FALSE)
plot(as.cimg(Re(img.resized)))
| /F.R | no_license | prinzz1208/acad-DIP | R | false | false | 2,044 | r | library(imager)
img.pic=load.image("/home/prinzz/Desktop/Work/Digital Image Processing/gitHub/test4.jpeg")
dim(img.pic)
size=rep(256,2)
img.resized=as.matrix(resize((img.pic),size[1],size[2]),size[1],size[2])
# img.resized2=as.matrix(resize(grayscale(img.pic),size[1],size[2]),size[1],size[2])
plot(as.cimg(img.resized))
img.padded=matrix(0,nrow(img.resized)*2,ncol(img.resized)*2)
P=nrow(img.padded)
Q=ncol(img.padded)
for (i in 1:nrow(img.resized)) {
for (j in 1:ncol(img.resized)) {
img.padded[i,j] = img.resized[i,j]
temp=i+j
# img.padded[i,j]=((-1)^temp)*img.padded[i,j]
}
}
plot(as.cimg(img.padded))
img.fft=fft(img.padded)
img.log=log(1+abs(img.fft))
plot(as.cimg(img.log))
kernel.mat=matrix(0,nrow(img.fft),ncol(img.fft))
distance=kernel.mat
nKernel=nrow(kernel.mat)
mKernel=ncol(kernel.mat)
for (i in 1:nKernel) {
for (j in 1:mKernel) {
temp=i+j
# img.fft[i,j] = (((-1)^temp)*img.fft[i,j])
}
}
for (i in 1:nKernel) {
for (j in 1:mKernel) {
distance[i,j] = sqrt((i-(P/2))^2+(j-(Q/2))^2)
}
}
cutOff <- 40
for (i in 1:nKernel) {
for (j in 1:mKernel) {
#
if(distance[i,j]>cutOff){ #IDEAL PASS FILTER
kernel.mat[i,j]=0
}else{
kernel.mat[i,j]=1
}
# kernel.mat[i,j]=(1/(1 + (distance[i,j]/cutOff)^4)) #BUTTERWORTH FILTER
# kernel.mat[i,j]=exp((-(distance[i,j])^2)/(2*(cutOff^2))) #GAUSSIAN FILTER
}
}
kernel.fft = fft(kernel.mat)
img.filtered = (kernel.fft)*img.fft
for (i in 1:nrow(img.filtered)) {
for (j in 1:ncol(img.filtered)) {
img.filtered[i,j]=Re(img.filtered[i,j])*(-1^(i+j))
}
}
img.ifft = Re(fft((img.filtered), inverse = TRUE))/length(img.filtered)
for (i in 1:nrow(img.ifft) ) {
for (j in 1:ncol(img.ifft)) {
img.ifft[i,j]=(img.ifft[i,j])*(-1^(i+j))
}
}
for (i in 1:(nrow(img.resized))) {
for (j in 1:(ncol(img.resized))) {
img.resized[i,j]=img.ifft[i,j]
}
}
# plot(as.cimg(Re(img.resized)+img.resized2))
img.log=log(1+abs(img.filtered))
# plot(as.cimg(img.log),rescale = FALSE)
plot(as.cimg(Re(img.resized)))
|
## Fabrizia Ronco
## April 2020
############################################################################
#### check Bayes Traits runs for convergence
### load packages
require(coda) # version 0.19-3
### allow variables from command line
args = commandArgs(trailingOnly=TRUE)
### set initial burnin. | the parfile of the variable rates model alread defines a burnin, only after which the chain is sampled.
###. this script tests if the chain has converged. if the chain has converged it created a file "additional_burinin" to define for downstream analyses how many of the posterior samples to be used
burnin =0
cropat=4000
### specify file to ue
dirname=args[1]
filename=args[2]
TREE=args[3]
TRAIT=args[4]
AXES=args[5]
### read the chain from the Bayes Traits run
d = scan(paste(dirname, filename, ".Log.txt", sep=""), what="numeric", sep="\t")
startH = grep("Iteration", d)[2]
endH = grep("Node", d)[2]+1
startD = endH +1
endD=length(d)
dimC =length(d[startH:endH])
dimR =length(d[startD:endD])/dimC
post=as.data.frame(matrix(d[startD:endD],dimR, dimC ,byrow = T))
names(post) = d[startH: endH]
post= post[,-length(post)]
for ( i in c(2,4,5,6,7)) post[,i] = as.numeric(as.character(post[,i]))
######## make plot function to visualze the chain convergence
plot_chain = function(input, burnin=0) {
if(burnin!= 0) {input= input[-c(1:burnin),]}
input$NoParam= input[,6] + input[,7]
par(mfrow=c(4,2), mar=c(5,5,2,2))
hist(input $Lh, col="deepskyblue4", border="black" , br=100, main= "", xlab="Lh")
plot(input $Lh ~ c(1:length(input$Iteration)), pch=16, cex=0.1 , col="deepskyblue4", ylab="Lh", xlab="sample");lines( c(1:length(input$Iteration)), input $Lh, col="deepskyblue4")
abline( lm(input $Lh ~ c(1:length(input $Iteration)) )); abline( mean(input $Lh), 0, col="red", lty="dashed")
hist(input[,4], br=100, col="deepskyblue4", border="black", main= "" , xlab="Alpha")
plot(input[,4] ~ c(1:length(input$Iteration)), pch=16, cex=0.1, col="deepskyblue4", ylab="Alpha", xlab="sample");lines( c(1:length(input$Iteration)), input[,4], col="deepskyblue4")
abline( lm(input[,4] ~ c(1:length(input $Iteration)) )); abline( mean(input[,4]), 0, col="red", lty="dashed")
hist(input[,5], br=20, col="deepskyblue4", border="black" , main= "", xlab="Sigma^2")
plot(input[,5] ~ c(1:length(input$Iteration)), pch=16, cex=0.1, col="deepskyblue4", ylab="Sigma^2", xlab="sample");lines( c(1:length(input$Iteration)), input[,5], col="deepskyblue4")
abline( lm(input[,5] ~ c(1:length(input $Iteration)) )); abline( mean(input[,5]), 0, col="red", lty="dashed")
hist(input$NoParam, br=47, col="deepskyblue4", border="black" , main= "", xlab="number of shifts")
plot(input $NoParam ~ c(1:length(input$Iteration)), pch=16, cex=0.1, col="deepskyblue4", ylab="number of shifts", xlab="sample");lines( c(1:length(input$Iteration)), input $NoParam, col="deepskyblue4")
abline( lm(input $NoParam ~ c(1:length(input $Iteration)) )); abline( mean(input $NoParam), 0, col="red", lty="dashed")
}
print(paste("############################## testing ", TREE, " ", TRAIT, " ", AXES, " for convergence ##############################", sep=""))
####. if the chain passes the test, the diagnostic stats is saved to file, the burnin file is generated (defining the posterior sample of the chain), the chain of the posterior sample is plotted and the mean logLikelihood of the posterior sample to be used in further analysis is calculated and written to file
MC2= mcmc(data=post[,-c(1,3)])
HD= heidel.diag(MC2)
if ( if(all(!is.na(HD[,1]))) all(HD[,2] <= cropat) else print("FALSE")){
write.table( HD, paste(dirname, filename, "_Chain_Diag_HD_", burnin, "_burnin.txt", sep=""), sep="\t", quote=F)
write.table(cropat,paste(dirname, "additional_burnin.txt", sep=""),row.name=F, col.name=F)
meanLH=mean(post$Lh[-c(1:cropat)]) ##. calculate mean log-likelihood
write.table(meanLH,paste(dirname, "mean_Lh_VarRates_", cropat, "_burnin.txt", sep=""),row.name=F, col.name=F)
## and mean AIC-scores
post$numpar= post[,6]+ post[,7] +2
post$AIC = (-2*post$Lh)+(2*post$numpar)
meanAIC= mean(post$AIC)
write.table(meanAIC,paste(dirname, "mean_AIC_VarRates_", cropat, "_burnin.txt", sep=""),row.name=F, col.name=F)
print("**** chain converged ****")
print(HD)
print("**** number of posterior samples: ****")
print(length(post$Lh[-c(1:cropat)]))
pdf(file=paste(dirname, filename, "_Chain_plots_burn_", cropat, ".pdf", sep="") )
plot_chain(post, cropat)
dev.off()
}else{ print("**** chain failed to converge ****")}
| /trait_evolution/03_TraitEvolution/scripts/03_testConvergence.R | no_license | cichlidx/ronco_et_al | R | false | false | 4,812 | r | ## Fabrizia Ronco
## April 2020
############################################################################
#### check Bayes Traits runs for convergence
### load packages
require(coda) # version 0.19-3
### allow variables from command line
args = commandArgs(trailingOnly=TRUE)
### set initial burnin. | the parfile of the variable rates model alread defines a burnin, only after which the chain is sampled.
###. this script tests if the chain has converged. if the chain has converged it created a file "additional_burinin" to define for downstream analyses how many of the posterior samples to be used
burnin =0
cropat=4000
### specify file to ue
dirname=args[1]
filename=args[2]
TREE=args[3]
TRAIT=args[4]
AXES=args[5]
### read the chain from the Bayes Traits run
d = scan(paste(dirname, filename, ".Log.txt", sep=""), what="numeric", sep="\t")
startH = grep("Iteration", d)[2]
endH = grep("Node", d)[2]+1
startD = endH +1
endD=length(d)
dimC =length(d[startH:endH])
dimR =length(d[startD:endD])/dimC
post=as.data.frame(matrix(d[startD:endD],dimR, dimC ,byrow = T))
names(post) = d[startH: endH]
post= post[,-length(post)]
for ( i in c(2,4,5,6,7)) post[,i] = as.numeric(as.character(post[,i]))
######## make plot function to visualze the chain convergence
plot_chain = function(input, burnin=0) {
if(burnin!= 0) {input= input[-c(1:burnin),]}
input$NoParam= input[,6] + input[,7]
par(mfrow=c(4,2), mar=c(5,5,2,2))
hist(input $Lh, col="deepskyblue4", border="black" , br=100, main= "", xlab="Lh")
plot(input $Lh ~ c(1:length(input$Iteration)), pch=16, cex=0.1 , col="deepskyblue4", ylab="Lh", xlab="sample");lines( c(1:length(input$Iteration)), input $Lh, col="deepskyblue4")
abline( lm(input $Lh ~ c(1:length(input $Iteration)) )); abline( mean(input $Lh), 0, col="red", lty="dashed")
hist(input[,4], br=100, col="deepskyblue4", border="black", main= "" , xlab="Alpha")
plot(input[,4] ~ c(1:length(input$Iteration)), pch=16, cex=0.1, col="deepskyblue4", ylab="Alpha", xlab="sample");lines( c(1:length(input$Iteration)), input[,4], col="deepskyblue4")
abline( lm(input[,4] ~ c(1:length(input $Iteration)) )); abline( mean(input[,4]), 0, col="red", lty="dashed")
hist(input[,5], br=20, col="deepskyblue4", border="black" , main= "", xlab="Sigma^2")
plot(input[,5] ~ c(1:length(input$Iteration)), pch=16, cex=0.1, col="deepskyblue4", ylab="Sigma^2", xlab="sample");lines( c(1:length(input$Iteration)), input[,5], col="deepskyblue4")
abline( lm(input[,5] ~ c(1:length(input $Iteration)) )); abline( mean(input[,5]), 0, col="red", lty="dashed")
hist(input$NoParam, br=47, col="deepskyblue4", border="black" , main= "", xlab="number of shifts")
plot(input $NoParam ~ c(1:length(input$Iteration)), pch=16, cex=0.1, col="deepskyblue4", ylab="number of shifts", xlab="sample");lines( c(1:length(input$Iteration)), input $NoParam, col="deepskyblue4")
abline( lm(input $NoParam ~ c(1:length(input $Iteration)) )); abline( mean(input $NoParam), 0, col="red", lty="dashed")
}
print(paste("############################## testing ", TREE, " ", TRAIT, " ", AXES, " for convergence ##############################", sep=""))
####. if the chain passes the test, the diagnostic stats is saved to file, the burnin file is generated (defining the posterior sample of the chain), the chain of the posterior sample is plotted and the mean logLikelihood of the posterior sample to be used in further analysis is calculated and written to file
MC2= mcmc(data=post[,-c(1,3)])
HD= heidel.diag(MC2)
if ( if(all(!is.na(HD[,1]))) all(HD[,2] <= cropat) else print("FALSE")){
write.table( HD, paste(dirname, filename, "_Chain_Diag_HD_", burnin, "_burnin.txt", sep=""), sep="\t", quote=F)
write.table(cropat,paste(dirname, "additional_burnin.txt", sep=""),row.name=F, col.name=F)
meanLH=mean(post$Lh[-c(1:cropat)]) ##. calculate mean log-likelihood
write.table(meanLH,paste(dirname, "mean_Lh_VarRates_", cropat, "_burnin.txt", sep=""),row.name=F, col.name=F)
## and mean AIC-scores
post$numpar= post[,6]+ post[,7] +2
post$AIC = (-2*post$Lh)+(2*post$numpar)
meanAIC= mean(post$AIC)
write.table(meanAIC,paste(dirname, "mean_AIC_VarRates_", cropat, "_burnin.txt", sep=""),row.name=F, col.name=F)
print("**** chain converged ****")
print(HD)
print("**** number of posterior samples: ****")
print(length(post$Lh[-c(1:cropat)]))
pdf(file=paste(dirname, filename, "_Chain_plots_burn_", cropat, ".pdf", sep="") )
plot_chain(post, cropat)
dev.off()
}else{ print("**** chain failed to converge ****")}
|
#plot all tracers for a given box and layer
this_run<-"SS3"
# this_run<-"ClimateChange"
this_run<-"TBGB_JP2"
TBGB=FALSE
TBGB=TRUE
this_path<-paste(DIR$'Base',"ATLANTISmodels\\",this_run,"\\",sep="")
if(TBGB==TRUE){
this_path = paste(DIR$'Base', "TBGB\\",this_run,"\\",sep="")
}
this_out <- c(paste("TestsSCA",c(1:5), sep="")); plotDescrip <-"SCA"
nlayers<-6
if(TBGB==TRUE){
groupsDF<-read.csv(paste(this_path,"\\TBGB_Groups.csv",sep="")); ng<-dim(groupsDF)[1]
} else{
groupsDF<-read.csv(paste(this_path,"..\\CRAM_groups.csv",sep="")); ng<-dim(groupsDF)[1]
}
# thisB0df<-read.csv(paste(this_path,"..\\CRAM_B0.csv",sep=""))
plotPath<-paste(this_path,"..\\Figures\\", plotDescrip,sep="")
showall<-TRUE
nruns<-length(this_out)
burnin<-rep(1,nruns) #number of years to skip in plot
# burnin<-c(36,1)
runCols<-c( colorRampPalette(colors=c("midnightblue",myBlue,myAqua,myGold, myOrange, "red"))(nruns))
# runCols<-c(colorRampPalette(colors=c("midnightblue",myBlue,myAqua,myGreen))(nruns-1), "red")
#
# runCols <- c(colorRampPalette(colors=c(myBlue,myAqua,myGreen))(4), "black", colorRampPalette(colors=c(myYellow, myOrange,"red"))(4))
# # runCols <- c( "black",colorRampPalette(colors=c(myBlue,myAqua,myGreen))(4))
daysTimeStep<-73
numStepsPerYear<-365/daysTimeStep
year0<-1865
fishingStartYear<-1865
modelStartYear<-1865
if(TBGB==TRUE){
year0<-1899
fishingStartYear<-1899
modelStartYear<-1899
}
nc_list<-NULL; nts_list<-NULL; min_nts<-1e+12
for(r in 1:nruns){
outPath<-paste(this_path,"output",this_out[r],"\\",sep="")
# if(TBGB==TRUE){
# thisRun<-thisRuns[r]
# nc_list[[r]]<-nc_open(paste(outPath,thisRun,".nc",sep=""))
#
# } else{
nc_list[[r]]<-nc_open(paste(outPath,"output.nc",sep=""))
# }
thisVol<-ncvar_get(nc_list[[r]],"volume")
thisDz<-ncvar_get(nc_list[[r]],"dz")
nts_list[[r]]<-dim(thisVol)[3]-burnin[r] #number of timesteps
if(showall==TRUE){nts_list[[r]]<-dim(thisVol)[3]}
if(nts_list[[r]]<min_nts){min_nts<-nts_list[[r]]}
}
nts_list
max_nts<-max(nts_list, na.rm=TRUE)
timeList<-NULL; timeMin <- 30000; timeMax <- 0
for(r in 1:nruns){
this_nts<-nts_list[[r]]; this_burnin <- burnin[r]
thisYear0<-1865 - this_burnin + 1
thisSeq <- seq(1, (this_nts-this_burnin +1)*daysTimeStep, by=daysTimeStep)/365
this_time <-thisYear0 + thisSeq
timeList[[r]]<-this_time
if(max(this_time) > timeMax){timeMax<-max(this_time)}
if(min(this_time) < timeMin){timeMin <- min(this_time)}
}
xLabsTemp<-seq(0,(max_nts*daysTimeStep),by=365)/365
xLabsAt<-xLabsTemp*numStepsPerYear
xLabs<-xLabsTemp+year0+burnin[1]
#get all tracer names
allTracers<-sort(names(nc_list[[r]]$var))
temp<-allTracers[grep("_N",allTracers)]; tracers2plot<-temp[grep("Nums",temp,invert = TRUE)];
# tracers2plot<-c(tracers2plot,"Oxygen","Temp","Si", "NO3")
ntracers<-length(tracers2plot)
dynBoxes<-2:24
# dynBoxes<-2:3
storeTracers<-array(NA, dim=c(nruns, length(tracers2plot), max(nts_list)+1))
plotsFile<-paste(plotPath,"ALL_N.pdf",sep="")
pdf(plotsFile)
par(mfrow=c(4,1),mar=c(3,4,2,0),oma=c(1,0,0,0))
for(t in 1:ntracers){
thisTracer<-tracers2plot[t]
temp<-ncvar_get(nc_list[[1]],thisTracer)
thisVol<-ncvar_get(nc_list[[1]],"volume")
if(length(dim(temp))==3){
yy<-apply(temp[,dynBoxes,]*thisVol[,dynBoxes,],3,sum) * mg_2_tonne * X_CN
} else{
yy<-apply(temp[dynBoxes,]*thisVol[nlayers,dynBoxes,],2,sum) * mg_2_tonne * X_CN
}
xx<-yy[burnin[1]:length(yy)]
if(showall==TRUE){
xx <- yy
}
# storeTracers[1, t, burnin[r]:length(yy)]<- xx
# storeTracers[1, t, ]<- xx
thisymax<-max(xx)*1.1
thisymin<-min(0,min(xx)*1.1)
if(showall==TRUE){
plot(x=timeList[[1]], y=xx,type="l",col=runCols[1],lwd=2,ylim=c(thisymin,thisymax*1.5),ylab="Biomass (tonnes)",xlab="Day", xlim=c(timeMin, timeMax))
mtext(thisTracer,side=3,adj=0,font=2)
for(r in 2:nruns){
temp<-ncvar_get(nc_list[[r]],thisTracer)
thisVol<-ncvar_get(nc_list[[r]],"volume")
if(length(dim(temp))==3){
yy<-apply(temp[,dynBoxes,]*thisVol[,dynBoxes,],3,sum) * mg_2_tonne * X_CN
} else{
yy<-apply(temp[dynBoxes,]*thisVol[nlayers,dynBoxes,],2,sum) * mg_2_tonne * X_CN
}
xx<-yy
points(x=timeList[[r]], y=xx,type="l",col=runCols[r],lwd=1.5,lty=r)
# storeTracers[r, t, burnin[r]:length(yy)]<- xx
# storeTracers[r, t, ]<- xx
# legend(legend=this_out,col=runCols,lty=seq(1,nruns),x="bottomleft")
}
} else{
plot(xx,type="l",col=runCols[1],lwd=2,ylim=c(thisymin,thisymax*1.5),ylab="Biomass (tonnes)",xlab="Day",xaxt="n")
mtext(thisTracer,side=3,adj=0,font=2)
# abline(h=1,col="red",lty=2,lwd=1.5)
axis(at=xLabsAt,labels=xLabs,side=1)
for(r in 2:nruns){
temp<-ncvar_get(nc_list[[r]],thisTracer)
thisVol<-ncvar_get(nc_list[[r]],"volume")
if(length(dim(temp))==3){
yy<-apply(temp[,dynBoxes,]*thisVol[,dynBoxes,],3,sum) * mg_2_tonne * X_CN
} else{
yy<-apply(temp[dynBoxes,]*thisVol[nlayers,dynBoxes,],2,sum) * mg_2_tonne * X_CN
}
xx<-yy[burnin[r]:length(yy)]
points(xx,type="l",col=runCols[r],lwd=1.5,lty=r)
# storeTracers[r, t, burnin[r]:length(yy)]<- xx
# storeTracers[r, t, ]<- xx
# legend(legend=this_out,col=runCols,lty=seq(1,nruns),x="bottomleft")
}
}
}
dev.off()
# pdf(paste(plotPath,"_LEGEND.pdf", sep=""), height=7, width=5)
makeBlankPlot()
legend(legend=this_out,col=runCols,lty=seq(1,nruns),x="center", seg.len=3, lwd=3)
# dev.off()
#
# ## do the high keystoneness ones on their own - add MB and BO too, as they are spectacularly variable so far!
# ks_codes <-c("HOK", "SPD", "PFS", "ORH", "BIS", "SB", "PFM", "CET", "HAK", "LIN", "SND", "MJE");
# nks<-length(ks_codes)
# for(k in 1:nks){
# thisCode <- ks_codes[k]
# thisName<-str_trim(groupsDF$Name[groupsDF$Code==thisCode])
# thisTracer<-paste(thisName,"_N", sep="")
# temp<-ncvar_get(nc_list[[1]],thisTracer)
# thisVol<-ncvar_get(nc_list[[1]],"volume")
# if(length(dim(temp))==3){
# yy<-apply(temp[,dynBoxes,]*thisVol[,dynBoxes,],3,sum) * mg_2_tonne * X_CN
# } else{
# yy<-apply(temp[dynBoxes,]*thisVol[nlayers,dynBoxes,],2,sum) * mg_2_tonne * X_CN
# }
# xx <- yy
# thisymax<-max(xx)*1.1
# thisymin<-min(0,min(xx)*1.1)
# thisPlotFile<-paste(plotPath, "fullBiomassTracers",thisCode,sep="")
# jpeg(paste(thisPlotFile,".jpg", sep=""), quality=3000)
# plot(x=timeList[[1]], y=xx,type="l",col=runCols[1],lwd=2,ylim=c(thisymin,thisymax*1.5),ylab="Biomass (tonnes)",xlab="Day", xlim=c(timeMin, timeMax))
# mtext(thisTracer,side=3,adj=0,font=2)
# for(r in 2:nruns){
# temp<-ncvar_get(nc_list[[r]],thisTracer)
# thisVol<-ncvar_get(nc_list[[r]],"volume")
# if(length(dim(temp))==3){
# yy<-apply(temp[,dynBoxes,]*thisVol[,dynBoxes,],3,sum) * mg_2_tonne * X_CN
# } else{
# yy<-apply(temp[dynBoxes,]*thisVol[nlayers,dynBoxes,],2,sum) * mg_2_tonne * X_CN
# }
# xx<-yy
# points(x=timeList[[r]], y=xx,type="l",col=runCols[r],lwd=1.5,lty=r)
# }
# dev.off()
# }
#
#
| /(2)Diagnostic_plots/(01b)plotNTracers_compareRuns.R | no_license | mcgregorv/AtlantisRscripts | R | false | false | 7,497 | r | #plot all tracers for a given box and layer
this_run<-"SS3"
# this_run<-"ClimateChange"
this_run<-"TBGB_JP2"
TBGB=FALSE
TBGB=TRUE
this_path<-paste(DIR$'Base',"ATLANTISmodels\\",this_run,"\\",sep="")
if(TBGB==TRUE){
this_path = paste(DIR$'Base', "TBGB\\",this_run,"\\",sep="")
}
this_out <- c(paste("TestsSCA",c(1:5), sep="")); plotDescrip <-"SCA"
nlayers<-6
if(TBGB==TRUE){
groupsDF<-read.csv(paste(this_path,"\\TBGB_Groups.csv",sep="")); ng<-dim(groupsDF)[1]
} else{
groupsDF<-read.csv(paste(this_path,"..\\CRAM_groups.csv",sep="")); ng<-dim(groupsDF)[1]
}
# thisB0df<-read.csv(paste(this_path,"..\\CRAM_B0.csv",sep=""))
plotPath<-paste(this_path,"..\\Figures\\", plotDescrip,sep="")
showall<-TRUE
nruns<-length(this_out)
burnin<-rep(1,nruns) #number of years to skip in plot
# burnin<-c(36,1)
runCols<-c( colorRampPalette(colors=c("midnightblue",myBlue,myAqua,myGold, myOrange, "red"))(nruns))
# runCols<-c(colorRampPalette(colors=c("midnightblue",myBlue,myAqua,myGreen))(nruns-1), "red")
#
# runCols <- c(colorRampPalette(colors=c(myBlue,myAqua,myGreen))(4), "black", colorRampPalette(colors=c(myYellow, myOrange,"red"))(4))
# # runCols <- c( "black",colorRampPalette(colors=c(myBlue,myAqua,myGreen))(4))
daysTimeStep<-73
numStepsPerYear<-365/daysTimeStep
year0<-1865
fishingStartYear<-1865
modelStartYear<-1865
if(TBGB==TRUE){
year0<-1899
fishingStartYear<-1899
modelStartYear<-1899
}
nc_list<-NULL; nts_list<-NULL; min_nts<-1e+12
for(r in 1:nruns){
outPath<-paste(this_path,"output",this_out[r],"\\",sep="")
# if(TBGB==TRUE){
# thisRun<-thisRuns[r]
# nc_list[[r]]<-nc_open(paste(outPath,thisRun,".nc",sep=""))
#
# } else{
nc_list[[r]]<-nc_open(paste(outPath,"output.nc",sep=""))
# }
thisVol<-ncvar_get(nc_list[[r]],"volume")
thisDz<-ncvar_get(nc_list[[r]],"dz")
nts_list[[r]]<-dim(thisVol)[3]-burnin[r] #number of timesteps
if(showall==TRUE){nts_list[[r]]<-dim(thisVol)[3]}
if(nts_list[[r]]<min_nts){min_nts<-nts_list[[r]]}
}
nts_list
max_nts<-max(nts_list, na.rm=TRUE)
timeList<-NULL; timeMin <- 30000; timeMax <- 0
for(r in 1:nruns){
this_nts<-nts_list[[r]]; this_burnin <- burnin[r]
thisYear0<-1865 - this_burnin + 1
thisSeq <- seq(1, (this_nts-this_burnin +1)*daysTimeStep, by=daysTimeStep)/365
this_time <-thisYear0 + thisSeq
timeList[[r]]<-this_time
if(max(this_time) > timeMax){timeMax<-max(this_time)}
if(min(this_time) < timeMin){timeMin <- min(this_time)}
}
xLabsTemp<-seq(0,(max_nts*daysTimeStep),by=365)/365
xLabsAt<-xLabsTemp*numStepsPerYear
xLabs<-xLabsTemp+year0+burnin[1]
#get all tracer names
allTracers<-sort(names(nc_list[[r]]$var))
temp<-allTracers[grep("_N",allTracers)]; tracers2plot<-temp[grep("Nums",temp,invert = TRUE)];
# tracers2plot<-c(tracers2plot,"Oxygen","Temp","Si", "NO3")
ntracers<-length(tracers2plot)
dynBoxes<-2:24
# dynBoxes<-2:3
storeTracers<-array(NA, dim=c(nruns, length(tracers2plot), max(nts_list)+1))
plotsFile<-paste(plotPath,"ALL_N.pdf",sep="")
pdf(plotsFile)
par(mfrow=c(4,1),mar=c(3,4,2,0),oma=c(1,0,0,0))
for(t in 1:ntracers){
thisTracer<-tracers2plot[t]
temp<-ncvar_get(nc_list[[1]],thisTracer)
thisVol<-ncvar_get(nc_list[[1]],"volume")
if(length(dim(temp))==3){
yy<-apply(temp[,dynBoxes,]*thisVol[,dynBoxes,],3,sum) * mg_2_tonne * X_CN
} else{
yy<-apply(temp[dynBoxes,]*thisVol[nlayers,dynBoxes,],2,sum) * mg_2_tonne * X_CN
}
xx<-yy[burnin[1]:length(yy)]
if(showall==TRUE){
xx <- yy
}
# storeTracers[1, t, burnin[r]:length(yy)]<- xx
# storeTracers[1, t, ]<- xx
thisymax<-max(xx)*1.1
thisymin<-min(0,min(xx)*1.1)
if(showall==TRUE){
plot(x=timeList[[1]], y=xx,type="l",col=runCols[1],lwd=2,ylim=c(thisymin,thisymax*1.5),ylab="Biomass (tonnes)",xlab="Day", xlim=c(timeMin, timeMax))
mtext(thisTracer,side=3,adj=0,font=2)
for(r in 2:nruns){
temp<-ncvar_get(nc_list[[r]],thisTracer)
thisVol<-ncvar_get(nc_list[[r]],"volume")
if(length(dim(temp))==3){
yy<-apply(temp[,dynBoxes,]*thisVol[,dynBoxes,],3,sum) * mg_2_tonne * X_CN
} else{
yy<-apply(temp[dynBoxes,]*thisVol[nlayers,dynBoxes,],2,sum) * mg_2_tonne * X_CN
}
xx<-yy
points(x=timeList[[r]], y=xx,type="l",col=runCols[r],lwd=1.5,lty=r)
# storeTracers[r, t, burnin[r]:length(yy)]<- xx
# storeTracers[r, t, ]<- xx
# legend(legend=this_out,col=runCols,lty=seq(1,nruns),x="bottomleft")
}
} else{
plot(xx,type="l",col=runCols[1],lwd=2,ylim=c(thisymin,thisymax*1.5),ylab="Biomass (tonnes)",xlab="Day",xaxt="n")
mtext(thisTracer,side=3,adj=0,font=2)
# abline(h=1,col="red",lty=2,lwd=1.5)
axis(at=xLabsAt,labels=xLabs,side=1)
for(r in 2:nruns){
temp<-ncvar_get(nc_list[[r]],thisTracer)
thisVol<-ncvar_get(nc_list[[r]],"volume")
if(length(dim(temp))==3){
yy<-apply(temp[,dynBoxes,]*thisVol[,dynBoxes,],3,sum) * mg_2_tonne * X_CN
} else{
yy<-apply(temp[dynBoxes,]*thisVol[nlayers,dynBoxes,],2,sum) * mg_2_tonne * X_CN
}
xx<-yy[burnin[r]:length(yy)]
points(xx,type="l",col=runCols[r],lwd=1.5,lty=r)
# storeTracers[r, t, burnin[r]:length(yy)]<- xx
# storeTracers[r, t, ]<- xx
# legend(legend=this_out,col=runCols,lty=seq(1,nruns),x="bottomleft")
}
}
}
dev.off()
# pdf(paste(plotPath,"_LEGEND.pdf", sep=""), height=7, width=5)
makeBlankPlot()
legend(legend=this_out,col=runCols,lty=seq(1,nruns),x="center", seg.len=3, lwd=3)
# dev.off()
#
# ## do the high keystoneness ones on their own - add MB and BO too, as they are spectacularly variable so far!
# ks_codes <-c("HOK", "SPD", "PFS", "ORH", "BIS", "SB", "PFM", "CET", "HAK", "LIN", "SND", "MJE");
# nks<-length(ks_codes)
# for(k in 1:nks){
# thisCode <- ks_codes[k]
# thisName<-str_trim(groupsDF$Name[groupsDF$Code==thisCode])
# thisTracer<-paste(thisName,"_N", sep="")
# temp<-ncvar_get(nc_list[[1]],thisTracer)
# thisVol<-ncvar_get(nc_list[[1]],"volume")
# if(length(dim(temp))==3){
# yy<-apply(temp[,dynBoxes,]*thisVol[,dynBoxes,],3,sum) * mg_2_tonne * X_CN
# } else{
# yy<-apply(temp[dynBoxes,]*thisVol[nlayers,dynBoxes,],2,sum) * mg_2_tonne * X_CN
# }
# xx <- yy
# thisymax<-max(xx)*1.1
# thisymin<-min(0,min(xx)*1.1)
# thisPlotFile<-paste(plotPath, "fullBiomassTracers",thisCode,sep="")
# jpeg(paste(thisPlotFile,".jpg", sep=""), quality=3000)
# plot(x=timeList[[1]], y=xx,type="l",col=runCols[1],lwd=2,ylim=c(thisymin,thisymax*1.5),ylab="Biomass (tonnes)",xlab="Day", xlim=c(timeMin, timeMax))
# mtext(thisTracer,side=3,adj=0,font=2)
# for(r in 2:nruns){
# temp<-ncvar_get(nc_list[[r]],thisTracer)
# thisVol<-ncvar_get(nc_list[[r]],"volume")
# if(length(dim(temp))==3){
# yy<-apply(temp[,dynBoxes,]*thisVol[,dynBoxes,],3,sum) * mg_2_tonne * X_CN
# } else{
# yy<-apply(temp[dynBoxes,]*thisVol[nlayers,dynBoxes,],2,sum) * mg_2_tonne * X_CN
# }
# xx<-yy
# points(x=timeList[[r]], y=xx,type="l",col=runCols[r],lwd=1.5,lty=r)
# }
# dev.off()
# }
#
#
|
salaryBAR <-
function()
{
dump("salaryBAR","c:\\StatBook\\salaryBAR.r")
sv=scan("c:\\StatBook\\Vermont.txt",what="")
sv=as.numeric(sv)/1000;sv=sv[!is.na(sv)];nv=length(sv)
sc=scan("c:\\StatBook\\Connecticut.txt",what="")
sc=as.numeric(sc)/1000;sc=sc[!is.na(sc)];nc=length(sc)
par(mfrow=c(1,2),mar=c(2,4,2,.1))
boxplot(list(sv,sc),names=c("Vermont","Connecticut"),ylab="Salary, $1000")
title("Standard boxplot")
boxplot(list(log10(sv),log10(sc)),xlim=c(0.5,2.5),names=c("Vermont","Connecticut"),yaxt="n",ylim=log10(c(20,250)),ylab="Salary, $1000")
title("log10 scale boxplot")
ysal=c(20,30,50,100,150,250)
axis(side=2,log10(ysal),labels=as.character(ysal),srt=90)
segments(rep(.9,nv),log10(sv),rep(1.1,nv),log10(sv))
segments(rep(1.9,nc),log10(sc),rep(2.1,nc),log10(sc))
}
| /RcodeData/salaryBAR.r | no_license | PepSalehi/advancedstatistics | R | false | false | 792 | r | salaryBAR <-
function()
{
dump("salaryBAR","c:\\StatBook\\salaryBAR.r")
sv=scan("c:\\StatBook\\Vermont.txt",what="")
sv=as.numeric(sv)/1000;sv=sv[!is.na(sv)];nv=length(sv)
sc=scan("c:\\StatBook\\Connecticut.txt",what="")
sc=as.numeric(sc)/1000;sc=sc[!is.na(sc)];nc=length(sc)
par(mfrow=c(1,2),mar=c(2,4,2,.1))
boxplot(list(sv,sc),names=c("Vermont","Connecticut"),ylab="Salary, $1000")
title("Standard boxplot")
boxplot(list(log10(sv),log10(sc)),xlim=c(0.5,2.5),names=c("Vermont","Connecticut"),yaxt="n",ylim=log10(c(20,250)),ylab="Salary, $1000")
title("log10 scale boxplot")
ysal=c(20,30,50,100,150,250)
axis(side=2,log10(ysal),labels=as.character(ysal),srt=90)
segments(rep(.9,nv),log10(sv),rep(1.1,nv),log10(sv))
segments(rep(1.9,nc),log10(sc),rep(2.1,nc),log10(sc))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rebird-package.R
\docType{package}
\name{rebird-package}
\alias{rebird}
\alias{rebird-package}
\title{rebird: R Client for the eBird Database of Bird Observations}
\description{
A programmatic client for the eBird database, including functions
for searching for bird observations by geographic location (latitude,
longitude), eBird hotspots, location identifiers, by notable sightings, by
region, and by taxonomic name.
}
\seealso{
Useful links:
\itemize{
\item \url{https://docs.ropensci.org/rebird}
\item \url{http://github.com/ropensci/rebird}
\item Report bugs at \url{http://github.com/ropensci/rebird/issues}
}
}
\author{
\strong{Maintainer}: Sebastian Pardo \email{sebpardo@gmail.com} (0000-0002-4147-5796)
Authors:
\itemize{
\item Rafael Maia \email{rm72@zips.uakron.edu}
\item Scott Chamberlain \email{myrmecocystus@gmail.com} (0000-0003-1444-9135)
\item Andy Teucher \email{andy.teucher@gmail.com}
}
Other contributors:
\itemize{
\item Guy Babineau \email{guy.babineau@gmail.com} [contributor]
}
}
\keyword{internal}
| /man/rebird-package.Rd | permissive | VLucet/rebird | R | false | true | 1,135 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rebird-package.R
\docType{package}
\name{rebird-package}
\alias{rebird}
\alias{rebird-package}
\title{rebird: R Client for the eBird Database of Bird Observations}
\description{
A programmatic client for the eBird database, including functions
for searching for bird observations by geographic location (latitude,
longitude), eBird hotspots, location identifiers, by notable sightings, by
region, and by taxonomic name.
}
\seealso{
Useful links:
\itemize{
\item \url{https://docs.ropensci.org/rebird}
\item \url{http://github.com/ropensci/rebird}
\item Report bugs at \url{http://github.com/ropensci/rebird/issues}
}
}
\author{
\strong{Maintainer}: Sebastian Pardo \email{sebpardo@gmail.com} (0000-0002-4147-5796)
Authors:
\itemize{
\item Rafael Maia \email{rm72@zips.uakron.edu}
\item Scott Chamberlain \email{myrmecocystus@gmail.com} (0000-0003-1444-9135)
\item Andy Teucher \email{andy.teucher@gmail.com}
}
Other contributors:
\itemize{
\item Guy Babineau \email{guy.babineau@gmail.com} [contributor]
}
}
\keyword{internal}
|
# PURPOSE: Create Figure 3 in the manuscript, displaying the variable importance
# plots over the entire AdaPT search for the \pi_1 model and the
# corresponding change in partial dependence plot. Still include a module
# specific enrichment plot.
# Author: Ron Yurko
# Load necessary packages:
library(tidyverse)
library(data.table)
library(cowplot)
library(latex2exp)
library(xgboost)
library(pdp)
# ------------------------------------------------------------------------------
# Load the SCZ BrainVar data:
bip_scz_brainvar_data <- read_csv("data/bip_schz_data/bip_scz_data_14_18_brainvar_eqtls_hcp_wgcna.csv")
# Load the final model results:
scz_with_bd_z_eqtl_slopes_wgcna <-
readRDS("data/bip_schz_data/brainvar_results/cv_tune_results/scz_with_bd_z_eqtl_slopes_wgcna_s05_2cv.rds")
# First create a display of the change in variable importance over the AdaPT search
# Start with the pi_1 models (odd numbers) - stacking the importance matrices
# together for each of the models:
pi_model_i <- seq(1, length(scz_with_bd_z_eqtl_slopes_wgcna$model_fit), by = 2)
pi_search_importance_data <- map_dfr(1:length(pi_model_i),
function(step_i) {
pi_model_step_i <- pi_model_i[step_i]
xgb.importance(model = scz_with_bd_z_eqtl_slopes_wgcna$model_fit[[pi_model_step_i]]) %>%
as.data.frame() %>%
mutate(adapt_step = step_i)
})
# Will highlight the top variables from the final model:
final_top_pi_importance <- pi_search_importance_data %>%
filter(adapt_step == length(pi_model_i)) %>%
arrange(desc(Gain)) %>%
dplyr::slice(1:8)
# Now create a display of the importance over the search with the top variable
# highlighted:
pi_importance_search_plot <- pi_search_importance_data %>%
filter(Feature %in% final_top_pi_importance$Feature) %>%
mutate(feature_label = str_replace_all(Feature, "_", " ") %>%
str_replace_all("ave abs ", "Average |") %>%
str_replace_all("beta", "beta|") %>%
str_replace_all("brainvar any gene", "WGCNA module:") %>%
str_replace_all("grey", "gray") %>%
str_replace_all("z bip 14", "BD z-statistics")) %>%
ggplot() +
geom_line(data = {(
pi_search_importance_data %>%
filter(!(Feature %in% final_top_pi_importance$Feature))
)}, aes(x = adapt_step, y = Gain,
group = Feature), color = "gray90", alpha = 0.5, size = 1) +
geom_line(aes(x = adapt_step, y = Gain,
color = feature_label,
group = feature_label,
linetype = feature_label),
size = 1.5, alpha = 0.8) +
geom_label_repel(data = {(
pi_search_importance_data %>%
filter(Feature %in% final_top_pi_importance$Feature) %>%
mutate(feature_label = str_replace_all(Feature, "_", " ") %>%
str_replace_all("ave abs ", "Average |") %>%
str_replace_all("beta", "beta|") %>%
str_replace_all("brainvar any gene", "WGCNA module:") %>%
str_replace_all("grey", "gray") %>%
str_replace_all("z bip 14", "BD z-statistics")) %>%
filter(adapt_step == length(pi_model_i))
)}, aes(x = adapt_step, y = Gain, label = feature_label,
color = feature_label),
direction = "y", nudge_x = .75, segment.size = 0.05,
hjust = 0) +
#scale_color_brewer(palette = "Set1") +
scale_color_manual(values = c(#"#E41A1C", "cyan", "#4DAF4A",
rep("goldenrod4", 3),
"darkblue",
"black", "brown",
"gray50", "salmon1")) +
scale_linetype_manual(guide = FALSE,
values = c("dotdash", "dotted",
"longdash", #rep("longdash", 3),
rep("solid", 5))) +
scale_x_continuous(limits = c(1, 25),
breaks = seq(1, length(pi_model_i),
by = 1)) +
theme_cowplot() +
labs(x = "AdaPT model fitting iteration",
y = "Importance",
color = "Variable",
title = TeX('Change in variable importance across $\\pi_1$ models in AdaPT search with top variables in final model highlighted')) +
theme(legend.position = "none")
# Next create the change in the partial dependence plot over the course of the
# search with AdaPT for the \pi_1 model - will do so at quantiles:
pi_quantiles_search_pdp_data <- map_dfr(1:length(pi_model_i),
function(step_i) {
pi_model_step_i <- pi_model_i[step_i]
pdp::partial(scz_with_bd_z_eqtl_slopes_wgcna$model_fit[[pi_model_step_i]],
pred.var = "z_bip_14",
ice = FALSE,
prob = TRUE,
center = FALSE,
plot = FALSE,
quantiles = TRUE,
probs = seq(0, 1, by = .025),
train = data.matrix(
bip_scz_brainvar_data[,scz_with_bd_z_eqtl_slopes_wgcna$model_fit[[1]]$feature_names])) %>%
as.data.frame() %>%
mutate(adapt_step = step_i)
})
pi_quantiles_pdp_search_plot <- pi_quantiles_search_pdp_data %>%
ggplot() +
geom_line(aes(x = z_bip_14, y = yhat,
color = adapt_step,
group = as.factor(adapt_step))) +
scale_y_continuous(limits = c(0, 1)) +
theme_cowplot() +
scale_color_gradient(low = "darkblue", high = "darkorange") +
geom_rug(data = bip_scz_brainvar_data,
aes(x = z_bip_14), y = rep(1, nrow(bip_scz_brainvar_data)),
sides = "b",
color = "black", alpha = 0.15) +
geom_vline(xintercept = 1.96, linetype = "dashed", color = "darkred") +
geom_vline(xintercept = -1.96, linetype = "dashed", color = "darkred") +
guides(color = guide_colourbar(barwidth = 10, barheight = .5)) +
labs(x = "BD z-statistic",
y = TeX('$\\pi_1$'),
color = "AdaPT model fitting iteration",
subtitle = "Dashed red lines indicate z-statistics equal to +/- 1.96",
title = TeX('Change in partial dependence for $\\pi_1$ and BD z-statistics')) +
theme(legend.position = "bottom",
legend.title = element_text(size = 10),
legend.text = element_text(size = 10),
plot.title = element_text(size = 12),
plot.subtitle = element_text(size = 10),
axis.title = element_text(size = 10),
axis.text = element_text(size = 8))
# Finally, create a plot revealing the enrichment for the salmon module for SCZ
scz_brainvar_salmon_plot <- bip_scz_brainvar_data %>%
ggplot(aes(x = scz_14_P,
fill = as.factor(brainvar_any_gene_salmon),
color = as.factor(brainvar_any_gene_salmon))) +
geom_histogram(breaks = seq(0, 1, by = 0.05), alpha = 0.5) +
scale_fill_manual(values = c("darkblue", "darkorange"),
labels = c("No", "Yes")) +
scale_color_manual(values = c("darkblue", "darkorange"), guide = FALSE) +
theme_bw() +
facet_wrap(~as.factor(brainvar_any_gene_salmon), ncol = 1,
scales = "free_y") +
theme(strip.text = element_blank(),
plot.title = element_text(size = 12),
strip.background = element_blank(),
legend.position = c(0.5, 0.3),
legend.direction = "vertical",
legend.background = element_blank()) +
labs(x = "SCZ p-values", y = "Count",
title = "Distribution of SCZ p-values by salmon module assignment",
fill = "Any cis-eQTL gene in salmon module?")
# Now create the grid of plots for figure 4 in the paper:
scz_brainvar_variables_f3 <-
plot_grid(pi_importance_search_plot,
plot_grid(pi_quantiles_pdp_search_plot,
scz_brainvar_salmon_plot,
ncol = 2, labels = c("B", "C"),
label_fontface = "plain", rel_widths = c(1, 1),
rel_heights = c(1, 1)),
labels = c("A", ""), label_fontface = "plain",
ncol = 1)
# Save
save_plot("figures/f3_scz_variable_plots.pdf",
scz_brainvar_variables_f3, ncol = 2, nrow = 2,
base_width = 6, base_height = 4)
save_plot("nonpdf_figures/f3_scz_variable_plots.jpg",
scz_brainvar_variables_f3, ncol = 2, nrow = 2,
base_width = 6, base_height = 4)
| /R/scz/create_f3_brainvar_variable_plots.R | no_license | SitaZhou/AdaPT-GWAS-manuscript-code | R | false | false | 8,909 | r | # PURPOSE: Create Figure 3 in the manuscript, displaying the variable importance
# plots over the entire AdaPT search for the \pi_1 model and the
# corresponding change in partial dependence plot. Still include a module
# specific enrichment plot.
# Author: Ron Yurko
# Load necessary packages:
library(tidyverse)
library(data.table)
library(cowplot)
library(latex2exp)
library(xgboost)
library(pdp)
# ------------------------------------------------------------------------------
# Load the SCZ BrainVar data:
bip_scz_brainvar_data <- read_csv("data/bip_schz_data/bip_scz_data_14_18_brainvar_eqtls_hcp_wgcna.csv")
# Load the final model results:
scz_with_bd_z_eqtl_slopes_wgcna <-
readRDS("data/bip_schz_data/brainvar_results/cv_tune_results/scz_with_bd_z_eqtl_slopes_wgcna_s05_2cv.rds")
# First create a display of the change in variable importance over the AdaPT search
# Start with the pi_1 models (odd numbers) - stacking the importance matrices
# together for each of the models:
pi_model_i <- seq(1, length(scz_with_bd_z_eqtl_slopes_wgcna$model_fit), by = 2)
pi_search_importance_data <- map_dfr(1:length(pi_model_i),
function(step_i) {
pi_model_step_i <- pi_model_i[step_i]
xgb.importance(model = scz_with_bd_z_eqtl_slopes_wgcna$model_fit[[pi_model_step_i]]) %>%
as.data.frame() %>%
mutate(adapt_step = step_i)
})
# Will highlight the top variables from the final model:
final_top_pi_importance <- pi_search_importance_data %>%
filter(adapt_step == length(pi_model_i)) %>%
arrange(desc(Gain)) %>%
dplyr::slice(1:8)
# Now create a display of the importance over the search with the top variable
# highlighted:
pi_importance_search_plot <- pi_search_importance_data %>%
filter(Feature %in% final_top_pi_importance$Feature) %>%
mutate(feature_label = str_replace_all(Feature, "_", " ") %>%
str_replace_all("ave abs ", "Average |") %>%
str_replace_all("beta", "beta|") %>%
str_replace_all("brainvar any gene", "WGCNA module:") %>%
str_replace_all("grey", "gray") %>%
str_replace_all("z bip 14", "BD z-statistics")) %>%
ggplot() +
geom_line(data = {(
pi_search_importance_data %>%
filter(!(Feature %in% final_top_pi_importance$Feature))
)}, aes(x = adapt_step, y = Gain,
group = Feature), color = "gray90", alpha = 0.5, size = 1) +
geom_line(aes(x = adapt_step, y = Gain,
color = feature_label,
group = feature_label,
linetype = feature_label),
size = 1.5, alpha = 0.8) +
geom_label_repel(data = {(
pi_search_importance_data %>%
filter(Feature %in% final_top_pi_importance$Feature) %>%
mutate(feature_label = str_replace_all(Feature, "_", " ") %>%
str_replace_all("ave abs ", "Average |") %>%
str_replace_all("beta", "beta|") %>%
str_replace_all("brainvar any gene", "WGCNA module:") %>%
str_replace_all("grey", "gray") %>%
str_replace_all("z bip 14", "BD z-statistics")) %>%
filter(adapt_step == length(pi_model_i))
)}, aes(x = adapt_step, y = Gain, label = feature_label,
color = feature_label),
direction = "y", nudge_x = .75, segment.size = 0.05,
hjust = 0) +
#scale_color_brewer(palette = "Set1") +
scale_color_manual(values = c(#"#E41A1C", "cyan", "#4DAF4A",
rep("goldenrod4", 3),
"darkblue",
"black", "brown",
"gray50", "salmon1")) +
scale_linetype_manual(guide = FALSE,
values = c("dotdash", "dotted",
"longdash", #rep("longdash", 3),
rep("solid", 5))) +
scale_x_continuous(limits = c(1, 25),
breaks = seq(1, length(pi_model_i),
by = 1)) +
theme_cowplot() +
labs(x = "AdaPT model fitting iteration",
y = "Importance",
color = "Variable",
title = TeX('Change in variable importance across $\\pi_1$ models in AdaPT search with top variables in final model highlighted')) +
theme(legend.position = "none")
# Next create the change in the partial dependence plot over the course of the
# search with AdaPT for the \pi_1 model - will do so at quantiles:
pi_quantiles_search_pdp_data <- map_dfr(1:length(pi_model_i),
function(step_i) {
pi_model_step_i <- pi_model_i[step_i]
pdp::partial(scz_with_bd_z_eqtl_slopes_wgcna$model_fit[[pi_model_step_i]],
pred.var = "z_bip_14",
ice = FALSE,
prob = TRUE,
center = FALSE,
plot = FALSE,
quantiles = TRUE,
probs = seq(0, 1, by = .025),
train = data.matrix(
bip_scz_brainvar_data[,scz_with_bd_z_eqtl_slopes_wgcna$model_fit[[1]]$feature_names])) %>%
as.data.frame() %>%
mutate(adapt_step = step_i)
})
pi_quantiles_pdp_search_plot <- pi_quantiles_search_pdp_data %>%
ggplot() +
geom_line(aes(x = z_bip_14, y = yhat,
color = adapt_step,
group = as.factor(adapt_step))) +
scale_y_continuous(limits = c(0, 1)) +
theme_cowplot() +
scale_color_gradient(low = "darkblue", high = "darkorange") +
geom_rug(data = bip_scz_brainvar_data,
aes(x = z_bip_14), y = rep(1, nrow(bip_scz_brainvar_data)),
sides = "b",
color = "black", alpha = 0.15) +
geom_vline(xintercept = 1.96, linetype = "dashed", color = "darkred") +
geom_vline(xintercept = -1.96, linetype = "dashed", color = "darkred") +
guides(color = guide_colourbar(barwidth = 10, barheight = .5)) +
labs(x = "BD z-statistic",
y = TeX('$\\pi_1$'),
color = "AdaPT model fitting iteration",
subtitle = "Dashed red lines indicate z-statistics equal to +/- 1.96",
title = TeX('Change in partial dependence for $\\pi_1$ and BD z-statistics')) +
theme(legend.position = "bottom",
legend.title = element_text(size = 10),
legend.text = element_text(size = 10),
plot.title = element_text(size = 12),
plot.subtitle = element_text(size = 10),
axis.title = element_text(size = 10),
axis.text = element_text(size = 8))
# Finally, create a plot revealing the enrichment for the salmon module for SCZ
scz_brainvar_salmon_plot <- bip_scz_brainvar_data %>%
ggplot(aes(x = scz_14_P,
fill = as.factor(brainvar_any_gene_salmon),
color = as.factor(brainvar_any_gene_salmon))) +
geom_histogram(breaks = seq(0, 1, by = 0.05), alpha = 0.5) +
scale_fill_manual(values = c("darkblue", "darkorange"),
labels = c("No", "Yes")) +
scale_color_manual(values = c("darkblue", "darkorange"), guide = FALSE) +
theme_bw() +
facet_wrap(~as.factor(brainvar_any_gene_salmon), ncol = 1,
scales = "free_y") +
theme(strip.text = element_blank(),
plot.title = element_text(size = 12),
strip.background = element_blank(),
legend.position = c(0.5, 0.3),
legend.direction = "vertical",
legend.background = element_blank()) +
labs(x = "SCZ p-values", y = "Count",
title = "Distribution of SCZ p-values by salmon module assignment",
fill = "Any cis-eQTL gene in salmon module?")
# Now create the grid of plots for figure 4 in the paper:
scz_brainvar_variables_f3 <-
plot_grid(pi_importance_search_plot,
plot_grid(pi_quantiles_pdp_search_plot,
scz_brainvar_salmon_plot,
ncol = 2, labels = c("B", "C"),
label_fontface = "plain", rel_widths = c(1, 1),
rel_heights = c(1, 1)),
labels = c("A", ""), label_fontface = "plain",
ncol = 1)
# Save
save_plot("figures/f3_scz_variable_plots.pdf",
scz_brainvar_variables_f3, ncol = 2, nrow = 2,
base_width = 6, base_height = 4)
save_plot("nonpdf_figures/f3_scz_variable_plots.jpg",
scz_brainvar_variables_f3, ncol = 2, nrow = 2,
base_width = 6, base_height = 4)
|
##CTT focuses on a decomposition of observed score variance
##IRT is going to focus on the probability of a correct response to a given item by a person of a specific ability
##Goal here is to generate some intuition into why this is a reasonable idea to consider.
#########################################################
##we are first going to read in an empirical dichotomously coded item response dataset
resp<-read.table("https://github.com/ben-domingue/252L/raw/master/data/emp-rasch.txt",header=FALSE)
resp[rowSums(is.na(resp))==0,]->resp
#########################################################
##what we want to do is look at the proportion of correct responses for different observed scores / sum scores
##the first step in this process wil be to organize data appropriately
##in particular, we want a matrix that has:
##items going from hardest to easiest
##persons going from least to most able
##we'll do the person-side sorting first
##we're going to just go through each observed sum score and collect the people
tmp<-list()
rowSums(resp)->rs
for (i in sort(unique(rs))) {
resp[rs==i,]->tmp[[as.character(i)]]
} #so what is structure of tmp?
do.call("rbind",tmp)->resp #this is a kind of tough command. see if you can make sense of it. i find working in this way with lists is super intuitive once you see the logic (let's talk if you don't!).
##we'll do the items a little more succinctly. we could have done something like this for the people.
colSums(resp)->cs
resp[,order(cs,decreasing=FALSE)]->resp
##just a quick double check that everything is monotonic in the ways we'd expect
##what do you expect to see? before running the next set of commands, draw a pictue for yourself.
par(mfrow=c(2,1))
plot(colMeans(resp),type="l")
plot(rowMeans(resp),type="l")
##pause at this point to check in with ben
#############################################################
##now we have most able examinees on the bottom and the hardest items on the left in 'resp'.
##aside: my entire dissertation was spent futzing about with implications that following from such orderings. https://link.springer.com/article/10.1007/s11336-013-9342-4
##let's condense this by collapsing rows so that all individuals with a common score are represented in a single row.
##a cell will now tell us the proportion of respondents in that row who responded correctly to a given item
rowSums(resp)->rs
tmp<-list()
sort(unique(rs))->rs.sorted
for (i in rs.sorted) {
resp[rs==i,,drop=FALSE]->z
colMeans(z)->tmp[[as.character(i)]]
}
do.call("rbind",tmp)->prop
rs.sorted->rownames(prop)
##note: it is this sort of list usage that i find very convenient. for each element of the list, we transformed a matrix (containing all respondents with a given sum score) into a single row vector (containing the proportion of correct responses to each item for the group of examinees with common sum score).
##that was handy!
#################################################################
##let's now look at the proportion of correct responsees as a function of sum score (for every row) for each item
##again, before running, what do you expect to see?
as.numeric(rownames(prop))->rs
5->i #first with just a single item
plot(rs,prop[,i],xlim=range(rs),ylim=0:1,xlab="sum score",ylab="% correct",type="l")
##Now all items
par(mfrow=c(10,5),mar=c(0,0,0,0))
for (i in 1:50) {
plot(rs,prop[,i],xlim=range(rs),ylim=0:1,xlab="",ylab="",type="l",xaxt="n",yaxt="n")
}
##questions
##what are qualitative differences between curves
##why is curve smoothest generally in the middle?
##these kinds of non-parametric "ogives" are intimately related to what we're going to start talking about next week as "item characteristic curves" or "item response functions"
##the two big differences:
##1. we'll impose a parametric structure (although one doesn't necessarily have to, https://en.wikipedia.org/wiki/Mokken_scale)
##2. we won't observe an individual's location on the x-axis. this is the hard part!
| /cC/towards_irt.R | no_license | ben-domingue/252L | R | false | false | 4,039 | r | ##CTT focuses on a decomposition of observed score variance
##IRT is going to focus on the probability of a correct response to a given item by a person of a specific ability
##Goal here is to generate some intuition into why this is a reasonable idea to consider.
#########################################################
##we are first going to read in an empirical dichotomously coded item response dataset
resp<-read.table("https://github.com/ben-domingue/252L/raw/master/data/emp-rasch.txt",header=FALSE)
resp[rowSums(is.na(resp))==0,]->resp
#########################################################
##what we want to do is look at the proportion of correct responses for different observed scores / sum scores
##the first step in this process wil be to organize data appropriately
##in particular, we want a matrix that has:
##items going from hardest to easiest
##persons going from least to most able
##we'll do the person-side sorting first
##we're going to just go through each observed sum score and collect the people
tmp<-list()
rowSums(resp)->rs
for (i in sort(unique(rs))) {
resp[rs==i,]->tmp[[as.character(i)]]
} #so what is structure of tmp?
do.call("rbind",tmp)->resp #this is a kind of tough command. see if you can make sense of it. i find working in this way with lists is super intuitive once you see the logic (let's talk if you don't!).
##we'll do the items a little more succinctly. we could have done something like this for the people.
colSums(resp)->cs
resp[,order(cs,decreasing=FALSE)]->resp
##just a quick double check that everything is monotonic in the ways we'd expect
##what do you expect to see? before running the next set of commands, draw a pictue for yourself.
par(mfrow=c(2,1))
plot(colMeans(resp),type="l")
plot(rowMeans(resp),type="l")
##pause at this point to check in with ben
#############################################################
##now we have most able examinees on the bottom and the hardest items on the left in 'resp'.
##aside: my entire dissertation was spent futzing about with implications that following from such orderings. https://link.springer.com/article/10.1007/s11336-013-9342-4
##let's condense this by collapsing rows so that all individuals with a common score are represented in a single row.
##a cell will now tell us the proportion of respondents in that row who responded correctly to a given item
rowSums(resp)->rs
tmp<-list()
sort(unique(rs))->rs.sorted
for (i in rs.sorted) {
resp[rs==i,,drop=FALSE]->z
colMeans(z)->tmp[[as.character(i)]]
}
do.call("rbind",tmp)->prop
rs.sorted->rownames(prop)
##note: it is this sort of list usage that i find very convenient. for each element of the list, we transformed a matrix (containing all respondents with a given sum score) into a single row vector (containing the proportion of correct responses to each item for the group of examinees with common sum score).
##that was handy!
#################################################################
##let's now look at the proportion of correct responsees as a function of sum score (for every row) for each item
##again, before running, what do you expect to see?
as.numeric(rownames(prop))->rs
5->i #first with just a single item
plot(rs,prop[,i],xlim=range(rs),ylim=0:1,xlab="sum score",ylab="% correct",type="l")
##Now all items
par(mfrow=c(10,5),mar=c(0,0,0,0))
for (i in 1:50) {
plot(rs,prop[,i],xlim=range(rs),ylim=0:1,xlab="",ylab="",type="l",xaxt="n",yaxt="n")
}
##questions
##what are qualitative differences between curves
##why is curve smoothest generally in the middle?
##these kinds of non-parametric "ogives" are intimately related to what we're going to start talking about next week as "item characteristic curves" or "item response functions"
##the two big differences:
##1. we'll impose a parametric structure (although one doesn't necessarily have to, https://en.wikipedia.org/wiki/Mokken_scale)
##2. we won't observe an individual's location on the x-axis. this is the hard part!
|
\name{binom.probit}
\alias{binom.probit}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Binomial confidence intervals using the probit parameterization}
\description{
Uses the probit parameterization on the observed
proportion to construct confidence intervals.
}
\usage{
binom.probit(x, n, conf.level = 0.95, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{Vector of number of successes in the binomial experiment.}
\item{n}{Vector of number of independent trials in the binomial experiment.}
\item{conf.level}{The level of confidence to be used in the confidence
interval.}
\item{\dots}{ignored}
}
\details{
For derivations see \emph{doc/binom.pdf}.
}
\value{
A \code{data.frame} containing the observed
proportions and the lower and upper bounds of the confidence
interval.
}
\author{Sundar Dorai-Raj (sdorairaj@gmail.com) }
\seealso{\code{\link{binom.confint}}, \code{\link{binom.bayes}},
\code{\link{binom.probit}}, \code{\link{binom.logit}}, \code{\link{binom.coverage}}}
\examples{
binom.probit(x = 0:10, n = 10)
}
\keyword{univar}% at least one, from doc/KEYWORDS
\keyword{htest}% __ONLY ONE__ keyword per line
\keyword{models}% __ONLY ONE__ keyword per line
| /man/binom.probit.Rd | no_license | cran/binom | R | false | false | 1,255 | rd | \name{binom.probit}
\alias{binom.probit}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Binomial confidence intervals using the probit parameterization}
\description{
Uses the probit parameterization on the observed
proportion to construct confidence intervals.
}
\usage{
binom.probit(x, n, conf.level = 0.95, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{Vector of number of successes in the binomial experiment.}
\item{n}{Vector of number of independent trials in the binomial experiment.}
\item{conf.level}{The level of confidence to be used in the confidence
interval.}
\item{\dots}{ignored}
}
\details{
For derivations see \emph{doc/binom.pdf}.
}
\value{
A \code{data.frame} containing the observed
proportions and the lower and upper bounds of the confidence
interval.
}
\author{Sundar Dorai-Raj (sdorairaj@gmail.com) }
\seealso{\code{\link{binom.confint}}, \code{\link{binom.bayes}},
\code{\link{binom.probit}}, \code{\link{binom.logit}}, \code{\link{binom.coverage}}}
\examples{
binom.probit(x = 0:10, n = 10)
}
\keyword{univar}% at least one, from doc/KEYWORDS
\keyword{htest}% __ONLY ONE__ keyword per line
\keyword{models}% __ONLY ONE__ keyword per line
|
install.packages("arules")
install.packages("proxy")
install.packages("registry")
install.packages("irlba")
install.packages("recommenderlab")
install.packages("reshape2")
install.packages("plyr")
install.packages("stringr")
install.packages("stringi")
install.packages("ggplot2")
install.packages("splitstackshape")
install.packages("data.table")
install.packages("gsubfn")
install.packages("sqldf")
install.packages("proto")
install.packages("RSQLite")
# Loading the libraries.
library(arules)
library(proxy)
library(registry)
library(irlba)
library(recommenderlab)
library(reshape2)
library(plyr)
library(stringr)
library(stringi)
library(ggplot2)
library(splitstackshape)
library(data.table)
library(proto)
library(RSQLite)
library(gsubfn)
library(sqldf)
# Data directory path for the file.
setwd ("/home/ratulr/Documents/data_mining/DataMining_Recommender_Mehregan/recommender-master")
# Read training file along with header
inputdata<-read.csv("ratings.csv",header=TRUE)
# Just look at first few lines of this file
head(inputdata)
# Remove 'timestamp' column. We do not need it
inputdata<-inputdata[,-c(4)]
head(inputdata)
# Setting the seed for the sampling.
set.seed(12)
#Taking stratified sampling from the data set based on user id . Taking 80% data to train the model.
train_data = stratified(inputdata, "userId", .8)
# test_data is the testing data set . Taking the remaining 20% of the data to test the model output.
test_data = sqldf("select * from inputdata except select * from train_data")
head(train_data)
# Using acast to convert above data as follows:
# m1 m2 m3 m4
# u1 3 4 2 5
# u2 1 6 5
# u3 4 4 2 5
acast_data <- acast(train_data, userId ~ movieId)
# Check the class of acast_data
class(acast_data)
# Convert it as a matrix
R <- as.matrix(acast_data)
# Convert R into realRatingMatrix data structure
# realRatingMatrix is a recommenderlab sparse-matrix like data-structure
r <- as(R, "realRatingMatrix")
r
# Create a recommender object (model)
# Run anyone of the following three code lines.
# Do not run all three
# They pertain to three different algorithms.
# UBCF: User-based collaborative filtering
# IBCF: Item-based collaborative filtering
# Parameter 'method' decides similarity measure
# Cosine or Jaccard or Pearson
# Train is using the training set.
# Cosine Method
#train_rec_ubcf <- Recommender(r[1:nrow(r)],method="UBCF", param=list(normalize = "Z-score",nn = 5,method="Cosine", minRating = 1))
# Jaccard Method
#train_rec_ubcf <- Recommender(r[1:nrow(r)],method="UBCF", param=list(normalize = "Z-score",method="Jaccard",nn=5, minRating=1))
# Pearson Method
train_rec_ubcf <- Recommender(r[1:nrow(r)],method="UBCF", param=list(normalize = "Z-score",method="Pearson",nn=5,minRating=1))
############Create predictions#############################
# This prediction does not predict movie ratings for test.
# But it fills up the user 'X' item matrix so that
# for any userid and movieid, I can find predicted rating
# 'type' parameter decides whether you want ratings or top-n items
# we will go for ratings
recom <- predict(train_rec_ubcf, r[1:nrow(r)], type="ratings")
recom
########## Create prediction file from model #######################
# We will create 3 files by running the model 3 times using 3 different similarity Jaccard,Cosine, Pearson
# Convert all your recommendations to list structure
rec_list<-as(recom,"list")
head(summary(rec_list))
ratings <- NULL
# For all lines in test file, one by one
for ( u in 1:length(test_data[,1]))
{
# Read userid and movieid from columns 2 and 3 of test data
userid <- test_data[u,1]
movieid <- test_data[u,2]
# Get as list & then convert to data frame all recommendations for user: userid
u1 <- as.data.frame(rec_list[[userid]])
# Create a (second column) column-id in the data-frame u1 and populate it with row-names
# Remember (or check) that rownames of u1 contain are by movie-ids
# We use row.names() function
u1$id <- row.names(u1)
# Now access movie ratings in column 1 of u1
x <- u1[u1$id==movieid,1]
# If no ratings were found, assign 0.
if (length(x)==0)
{
ratings[u] <- 0
}
else
{
ratings[u] <-x
}
}
length(ratings)
tx<-cbind(test_data[,1:3],round(ratings))
# Write to a csv file: output_<method>.csv in your folder
write.table(tx,file="output_pearson.csv",row.names=FALSE,col.names=FALSE,sep=',')
# Submit now this csv file to kaggle
######################################################################
#### To check the performance of the 3 models we will calculate NMAE:
######################################################################
#define a MAE function :
mae <- function(error)
{
mean(abs(error))
}
#For cosine
cosinedata <- read.csv("output_cosine.csv", header = FALSE)
cosine_actual <- cosinedata[,3]
cosine_predicted <- cosinedata[,4]
error_cosine <- cosine_predicted - cosine_actual
# MAE = Sum(|predicted - real |)/ n
mae_cosine <- mae(error_cosine)
# The Min and Max ratings are common for all three models as the testing set is same across all 3 models
max_rating = max(cosine_actual)
min_rating = min(cosine_actual)
#NMAE = MAE/ (max rating - min rating)
NMAE_cosine <- mae_cosine / (max_rating - min_rating )
NMAE_cosine
#NMAE_cosine = 0.1850611
#For jaccard
jaccarddata <- read.csv("output_jaccard.csv", header = FALSE)
jaccard_actual <- jaccarddata[,3]
jaccard_predicted <- jaccarddata[,4]
error_jaccard <- jaccard_predicted - jaccard_actual
# MAE = Sum(|predicted - real |)/ n
mae_jaccard <- mae(error_jaccard)
# The Min and Max ratings are common for all three models as the testing set is same across all 3 models
max_rating = max(jaccard_actual)
min_rating = min(jaccard_actual)
#NMAE = MAE/ (max rating - min rating)
NMAE_jaccard <- mae_jaccard / (max_rating - min_rating )
NMAE_jaccard
#NMAE_jaccard 0.1846834
#For pearson
pearsondata <- read.csv("output_pearson.csv", header = FALSE)
pearson_actual <- pearsondata[,3]
pearson_predicted <- pearsondata[,4]
error_pearson <- pearson_predicted - pearson_actual
# MAE = Sum(|predicted - real |)/ n
mae_pearson <- mae(error_pearson )
# The Min and Max ratings are common for all three models as the testing set is same across all 3 models
max_rating = max(pearson_actual)
min_rating = min(pearson_actual)
#NMAE = MAE/ (max rating - min rating)
NMAE_pearson <- mae_pearson / (max_rating - min_rating )
NMAE_pearson
#NMAE_pearson 0.1852389
| /Ratul_RS_code.R | no_license | theyogiwhocodes/recommenderSystems | R | false | false | 6,509 | r | install.packages("arules")
install.packages("proxy")
install.packages("registry")
install.packages("irlba")
install.packages("recommenderlab")
install.packages("reshape2")
install.packages("plyr")
install.packages("stringr")
install.packages("stringi")
install.packages("ggplot2")
install.packages("splitstackshape")
install.packages("data.table")
install.packages("gsubfn")
install.packages("sqldf")
install.packages("proto")
install.packages("RSQLite")
# Loading the libraries.
library(arules)
library(proxy)
library(registry)
library(irlba)
library(recommenderlab)
library(reshape2)
library(plyr)
library(stringr)
library(stringi)
library(ggplot2)
library(splitstackshape)
library(data.table)
library(proto)
library(RSQLite)
library(gsubfn)
library(sqldf)
# Data directory path for the file.
setwd ("/home/ratulr/Documents/data_mining/DataMining_Recommender_Mehregan/recommender-master")
# Read training file along with header
inputdata<-read.csv("ratings.csv",header=TRUE)
# Just look at first few lines of this file
head(inputdata)
# Remove 'timestamp' column. We do not need it
inputdata<-inputdata[,-c(4)]
head(inputdata)
# Setting the seed for the sampling.
set.seed(12)
#Taking stratified sampling from the data set based on user id . Taking 80% data to train the model.
train_data = stratified(inputdata, "userId", .8)
# test_data is the testing data set . Taking the remaining 20% of the data to test the model output.
test_data = sqldf("select * from inputdata except select * from train_data")
head(train_data)
# Using acast to convert above data as follows:
# m1 m2 m3 m4
# u1 3 4 2 5
# u2 1 6 5
# u3 4 4 2 5
acast_data <- acast(train_data, userId ~ movieId)
# Check the class of acast_data
class(acast_data)
# Convert it as a matrix
R <- as.matrix(acast_data)
# Convert R into realRatingMatrix data structure
# realRatingMatrix is a recommenderlab sparse-matrix like data-structure
r <- as(R, "realRatingMatrix")
r
# Create a recommender object (model)
# Run anyone of the following three code lines.
# Do not run all three
# They pertain to three different algorithms.
# UBCF: User-based collaborative filtering
# IBCF: Item-based collaborative filtering
# Parameter 'method' decides similarity measure
# Cosine or Jaccard or Pearson
# Train is using the training set.
# Cosine Method
#train_rec_ubcf <- Recommender(r[1:nrow(r)],method="UBCF", param=list(normalize = "Z-score",nn = 5,method="Cosine", minRating = 1))
# Jaccard Method
#train_rec_ubcf <- Recommender(r[1:nrow(r)],method="UBCF", param=list(normalize = "Z-score",method="Jaccard",nn=5, minRating=1))
# Pearson Method
train_rec_ubcf <- Recommender(r[1:nrow(r)],method="UBCF", param=list(normalize = "Z-score",method="Pearson",nn=5,minRating=1))
############Create predictions#############################
# This prediction does not predict movie ratings for test.
# But it fills up the user 'X' item matrix so that
# for any userid and movieid, I can find predicted rating
# 'type' parameter decides whether you want ratings or top-n items
# we will go for ratings
recom <- predict(train_rec_ubcf, r[1:nrow(r)], type="ratings")
recom
########## Create prediction file from model #######################
# We will create 3 files by running the model 3 times using 3 different similarity Jaccard,Cosine, Pearson
# Convert all your recommendations to list structure
rec_list<-as(recom,"list")
head(summary(rec_list))
ratings <- NULL
# For all lines in test file, one by one
for ( u in 1:length(test_data[,1]))
{
# Read userid and movieid from columns 2 and 3 of test data
userid <- test_data[u,1]
movieid <- test_data[u,2]
# Get as list & then convert to data frame all recommendations for user: userid
u1 <- as.data.frame(rec_list[[userid]])
# Create a (second column) column-id in the data-frame u1 and populate it with row-names
# Remember (or check) that rownames of u1 contain are by movie-ids
# We use row.names() function
u1$id <- row.names(u1)
# Now access movie ratings in column 1 of u1
x <- u1[u1$id==movieid,1]
# If no ratings were found, assign 0.
if (length(x)==0)
{
ratings[u] <- 0
}
else
{
ratings[u] <-x
}
}
length(ratings)
tx<-cbind(test_data[,1:3],round(ratings))
# Write to a csv file: output_<method>.csv in your folder
write.table(tx,file="output_pearson.csv",row.names=FALSE,col.names=FALSE,sep=',')
# Submit now this csv file to kaggle
######################################################################
#### To check the performance of the 3 models we will calculate NMAE:
######################################################################
#define a MAE function :
mae <- function(error)
{
mean(abs(error))
}
#For cosine
cosinedata <- read.csv("output_cosine.csv", header = FALSE)
cosine_actual <- cosinedata[,3]
cosine_predicted <- cosinedata[,4]
error_cosine <- cosine_predicted - cosine_actual
# MAE = Sum(|predicted - real |)/ n
mae_cosine <- mae(error_cosine)
# The Min and Max ratings are common for all three models as the testing set is same across all 3 models
max_rating = max(cosine_actual)
min_rating = min(cosine_actual)
#NMAE = MAE/ (max rating - min rating)
NMAE_cosine <- mae_cosine / (max_rating - min_rating )
NMAE_cosine
#NMAE_cosine = 0.1850611
#For jaccard
jaccarddata <- read.csv("output_jaccard.csv", header = FALSE)
jaccard_actual <- jaccarddata[,3]
jaccard_predicted <- jaccarddata[,4]
error_jaccard <- jaccard_predicted - jaccard_actual
# MAE = Sum(|predicted - real |)/ n
mae_jaccard <- mae(error_jaccard)
# The Min and Max ratings are common for all three models as the testing set is same across all 3 models
max_rating = max(jaccard_actual)
min_rating = min(jaccard_actual)
#NMAE = MAE/ (max rating - min rating)
NMAE_jaccard <- mae_jaccard / (max_rating - min_rating )
NMAE_jaccard
#NMAE_jaccard 0.1846834
#For pearson
pearsondata <- read.csv("output_pearson.csv", header = FALSE)
pearson_actual <- pearsondata[,3]
pearson_predicted <- pearsondata[,4]
error_pearson <- pearson_predicted - pearson_actual
# MAE = Sum(|predicted - real |)/ n
mae_pearson <- mae(error_pearson )
# The Min and Max ratings are common for all three models as the testing set is same across all 3 models
max_rating = max(pearson_actual)
min_rating = min(pearson_actual)
#NMAE = MAE/ (max rating - min rating)
NMAE_pearson <- mae_pearson / (max_rating - min_rating )
NMAE_pearson
#NMAE_pearson 0.1852389
|
#Assuming both rds file are present in the current directory
library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
reqdRows <- NEI[NEI$fips == "24510"|NEI$fips == "06037", ]
motorNames <- grep("motor", SCC$Short.Name, ignore.case = T)
motorSCCRows <- SCC[motorNames, ]
motorNEIRows <- reqdRows[reqdRows$SCC %in% motorSCCRows$SCC, ]
par("mar"=c(5.1, 4.5, 4.1, 2.1))
png(filename = "plot6.png",
width = 480, height = 480,
units = "px", bg = "transparent")
g <- ggplot(motorNEIRows, aes(year, Emissions, color = fips))
g + geom_line(stat = "summary", fun.y = "sum") +
ylab(expression('Total PM'[2.5]*" Emissions")) +
ggtitle("Comparison of Total Emissions From Motor\n Vehicle Sources in Baltimore City\n and Los Angeles County from 1999 to 2008") +
scale_colour_discrete(name = "Group", label = c("Los Angeles","Baltimore"))
print(g+geom_point())
dev.off() | /Assignment 2/plot6.R | no_license | amolsharma99/ExData_Plotting1 | R | false | false | 923 | r | #Assuming both rds file are present in the current directory
library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
reqdRows <- NEI[NEI$fips == "24510"|NEI$fips == "06037", ]
motorNames <- grep("motor", SCC$Short.Name, ignore.case = T)
motorSCCRows <- SCC[motorNames, ]
motorNEIRows <- reqdRows[reqdRows$SCC %in% motorSCCRows$SCC, ]
par("mar"=c(5.1, 4.5, 4.1, 2.1))
png(filename = "plot6.png",
width = 480, height = 480,
units = "px", bg = "transparent")
g <- ggplot(motorNEIRows, aes(year, Emissions, color = fips))
g + geom_line(stat = "summary", fun.y = "sum") +
ylab(expression('Total PM'[2.5]*" Emissions")) +
ggtitle("Comparison of Total Emissions From Motor\n Vehicle Sources in Baltimore City\n and Los Angeles County from 1999 to 2008") +
scale_colour_discrete(name = "Group", label = c("Los Angeles","Baltimore"))
print(g+geom_point())
dev.off() |
#Bama game
file <- read.csv("~/nd_basketball/raw_plays/advo12.csv")
bama_starters = | /clean.R | no_license | eswan18/nd_basketball | R | false | false | 84 | r | #Bama game
file <- read.csv("~/nd_basketball/raw_plays/advo12.csv")
bama_starters = |
# Sourcing the estimators ---------------------------------------------------
source("Microstructure_noise_estimators/TSRV.R")
source("Microstructure_noise_estimators/Pre_average.R")
source("Microstructure_noise_estimators/Realized_Kernels.R")
source("Microstructure_noise_estimators/Heston_Sim.R")
source("Jumps/BPV_adj.R")
source("Jumps/MBPV.R")
IV_fun <- function(vol_path, t, delta){
eval_vals <- floor(t / delta)
eval_vals <- ifelse(eval_vals == 0, 1, eval_vals)
vol_path[eval_vals]
}
# Parameters --------------------------------------------------------------
source("Jumps/Heston_Poisson.R")
r = 0.05
alpha = 0.04*5
lambda = 5
sigma_v = 0.5
rho = -0.5
S_0 = 1
V_0 = 0.3
n = 23400
T_val = 1/252
delta = T_val/n
intensity = 50/T_val
m = 0
#Make grid
sigma_eps_grid = c(0, 0.00025, 0.0005, 0.001)
jumps_std_grid = c(0, 0.0016, 0.0025, 0.0032)
Paramater_sets = expand.grid(sigma_eps = sigma_eps_grid, jump_std = jumps_std_grid)
# Monte Carlo --------------------------------------------------------------
M = 10000
cores <- 10
cluster <- makeCluster(cores)
clusterEvalQ(cluster, c(library(dplyr)))
clusterExport(cl = cluster, ls(), environment())
sim_result = lapply( X = 1:nrow(Paramater_sets), function(params_idx){
single_sim <- pbapply::pblapply(cl = cluster, X = 1:M, FUN = function(params){
sigma_eps <- Paramater_sets[params_idx,]$sigma_eps
jump_std <- Paramater_sets[params_idx,]$jump_std
hest_sim <- Heston_Sim(T_val = T_val, n = n , r = r, rho = rho , alpha = alpha,
lambda = lambda, sigma_v = sigma_v, S_0 = S_0 , V_0 = V_0)
micro_noise <- sigma_eps * rnorm(n + 1)
Jumps_sim <- Compound_Pois(T_val = T_val , n = n + 1, intensity = intensity , m = m, sigma_J = jump_std)
Path <- hest_sim$Heston + micro_noise + Jumps_sim
TSRV <- TSRV_estimator_optimal(X = Path, 5, 1, 3, T_val)
RV <- sum(diff(Path)^2)
RV_conf_std <- sqrt(2/3 * sum(diff(Path)^4))
RV_conf_upper <- RV + qnorm(0.975) * RV_conf_std
RV_conf_lower <- RV + qnorm(0.025) * RV_conf_std
IV <- integrate(IV_fun, lower = 0, upper = T_val, vol_path = hest_sim$Vol, delta = delta, subdivisions = 10000000)$value
data.frame(RV = abs(RV - IV)/IV, RV_hit = (IV >= RV_conf_lower & IV <= RV_conf_upper),
TSRV = abs(TSRV$TSRV - IV)/IV, TSRV_hit = (IV >= TSRV$lower & IV <= TSRV$upper)
)
}) %>% do.call(what = rbind) %>% colMeans()
return_single_sim <- data.frame(single_sim)
colnames(return_single_sim) <- paste("sigma_eps = ", Paramater_sets[params_idx,]$sigma_eps, "|jump_std = ", Paramater_sets[params_idx,]$jump_std, sep = "")
return_single_sim
}) %>% do.call(what = cbind)
stopCluster(cluster)
saveRDS(sim_result, "tsrv_corrected_coverage_sigma_J_sigma_eps.rds") | /Chapter4/simulation_runs/EstimatorComparisson_Sigma_eps_sigma_J.R | no_license | MarksSmirnovs/Quadratic-Variation-Disentanglement-A-High-Frequency-Data-Approach | R | false | false | 2,810 | r |
# Sourcing the estimators ---------------------------------------------------
source("Microstructure_noise_estimators/TSRV.R")
source("Microstructure_noise_estimators/Pre_average.R")
source("Microstructure_noise_estimators/Realized_Kernels.R")
source("Microstructure_noise_estimators/Heston_Sim.R")
source("Jumps/BPV_adj.R")
source("Jumps/MBPV.R")
IV_fun <- function(vol_path, t, delta){
eval_vals <- floor(t / delta)
eval_vals <- ifelse(eval_vals == 0, 1, eval_vals)
vol_path[eval_vals]
}
# Parameters --------------------------------------------------------------
source("Jumps/Heston_Poisson.R")
r = 0.05
alpha = 0.04*5
lambda = 5
sigma_v = 0.5
rho = -0.5
S_0 = 1
V_0 = 0.3
n = 23400
T_val = 1/252
delta = T_val/n
intensity = 50/T_val
m = 0
#Make grid
sigma_eps_grid = c(0, 0.00025, 0.0005, 0.001)
jumps_std_grid = c(0, 0.0016, 0.0025, 0.0032)
Paramater_sets = expand.grid(sigma_eps = sigma_eps_grid, jump_std = jumps_std_grid)
# Monte Carlo --------------------------------------------------------------
M = 10000
cores <- 10
cluster <- makeCluster(cores)
clusterEvalQ(cluster, c(library(dplyr)))
clusterExport(cl = cluster, ls(), environment())
sim_result = lapply( X = 1:nrow(Paramater_sets), function(params_idx){
single_sim <- pbapply::pblapply(cl = cluster, X = 1:M, FUN = function(params){
sigma_eps <- Paramater_sets[params_idx,]$sigma_eps
jump_std <- Paramater_sets[params_idx,]$jump_std
hest_sim <- Heston_Sim(T_val = T_val, n = n , r = r, rho = rho , alpha = alpha,
lambda = lambda, sigma_v = sigma_v, S_0 = S_0 , V_0 = V_0)
micro_noise <- sigma_eps * rnorm(n + 1)
Jumps_sim <- Compound_Pois(T_val = T_val , n = n + 1, intensity = intensity , m = m, sigma_J = jump_std)
Path <- hest_sim$Heston + micro_noise + Jumps_sim
TSRV <- TSRV_estimator_optimal(X = Path, 5, 1, 3, T_val)
RV <- sum(diff(Path)^2)
RV_conf_std <- sqrt(2/3 * sum(diff(Path)^4))
RV_conf_upper <- RV + qnorm(0.975) * RV_conf_std
RV_conf_lower <- RV + qnorm(0.025) * RV_conf_std
IV <- integrate(IV_fun, lower = 0, upper = T_val, vol_path = hest_sim$Vol, delta = delta, subdivisions = 10000000)$value
data.frame(RV = abs(RV - IV)/IV, RV_hit = (IV >= RV_conf_lower & IV <= RV_conf_upper),
TSRV = abs(TSRV$TSRV - IV)/IV, TSRV_hit = (IV >= TSRV$lower & IV <= TSRV$upper)
)
}) %>% do.call(what = rbind) %>% colMeans()
return_single_sim <- data.frame(single_sim)
colnames(return_single_sim) <- paste("sigma_eps = ", Paramater_sets[params_idx,]$sigma_eps, "|jump_std = ", Paramater_sets[params_idx,]$jump_std, sep = "")
return_single_sim
}) %>% do.call(what = cbind)
stopCluster(cluster)
saveRDS(sim_result, "tsrv_corrected_coverage_sigma_J_sigma_eps.rds") |
# Treelet analysis of food group data
library(tidyverse)
library(treelet) # For treelet transform
library(ggdendro)
library(here)
source(here("Code", "treelet_functions.R"))
cat("\n Treelet analysis on food groups\n")
# Generate maximum height tree
# Save basis vectors at all cut levels so the most useful can be selected later
food_groups_tree_full <- Run_JTree(food_groups_cor, maxlev = ncol(food_groups_cor)-1, whichsave = 1:(ncol(food_groups_cor)-1))
# Extract the treelet components and associated variances
food_groups_tc_full <- TTVariances(food_groups_tree_full, food_groups_cor)
# Dendrogram of maximum height tree
food_groups_dendro <- dendro_data(ConvertTTDendro(food_groups_tree_full))
# Plotting order
food_groups_dendro_order <- as.character(food_groups_dendro$labels$label)
# Initial analysis - data driven selection of number of components and cut points
## Cross validation ##
# Data driven cross validation to choose a cut level that can describe the
# data with few components (TCs)
# Set the desired number of components (m) based on scree plot and then find the best cut level
# Cross validation scores for each cut level
m_grps <- 8
cvs_grps <- replicate(5, CrossValidateTT(food_groups_scl, m = m_grps))
# Fit reduced treelet
# Project selected components to get scores for each individual using the selected cut level
# Cut level selected based on cross validation and inspection of dendrogram
# Original
food_groups_tc_reduced <- TTVariances(food_groups_tree_full, cor(food_groups_scl), cut_level = 28, components = m_grps)
food_groups_tc_scores <- food_groups_scl %*% food_groups_tc_reduced$tc
cat("\n Calculate correlation of TC scores ")
food_groups_tc_cor <- cor(food_groups_tc_scores, method = "kendall") %>%
formatC(digits = 2, format = "f")
food_groups_tc_cor[upper.tri(food_groups_tc_cor, diag = T)] <- ""
### Add TC scores to main NHANES dataset
# The resulting dataset is distinct from the one used in the nutrient analysis ('nh')
nh_grps <- bind_cols(nhanes, as_tibble(food_groups_tc_scores)) %>%
# Classify TC scores into deciles
mutate_at(vars(matches("^TC[0-9]{1,2}$")), list(~as.factor(ntile(., n = 10)))) %>%
rename_at(vars(matches("^TC[0-9]{1,2}$")), ~paste0(., "_dec")) %>%
# Raw TC scores
bind_cols(as_tibble(food_groups_tc_scores)) %>%
inner_join(dietary, by = "SEQN") %>%
# Standardise age and KCAL to make predictions easier
mutate(RIDAGEYR = as.numeric(scale(RIDAGEYR)),
KCAL_raw = KCAL,
KCAL = as.numeric(scale(KCAL))) %>%
# Set up outcome variable for Beta regression
mutate(prop_CAL_sites3mm_beta = PropTransform(prop_CAL_sites3mm))
#food_groups_out <- bind_cols(diet, data.frame(food_groups_tc_scores))
WrapLabel <- function(x){
xwrap <- str_wrap(x, width = 30)
# str_pad(
if_else(str_detect(xwrap, "\n"),
as.character(xwrap),
paste0("\n", as.character(xwrap))) %>%
as_factor()
# width = 30, side = "right")
}
# Extract loadings for TCs
food_groups_loadings <- food_groups_tc_reduced$tc %>%
as_tibble(rownames = "Variable") %>%
gather(Component, Value, -Variable) %>%
# Add food group descriptions
inner_join(fgrp %>%
distinct(grp_code, grp_description),
by = c("Variable" = "grp_code")) %>%
# Order by leaf labelling order
mutate(Variable = factor(Variable, levels = food_groups_dendro_order)) %>%
arrange(Variable) %>%
mutate(grp_description = as_factor(grp_description),
grp_padded = WrapLabel(grp_description))
| /Code/treelet_food_groups.R | no_license | david-m-wright/NHANES-diet-periodontal-disease | R | false | false | 3,633 | r | # Treelet analysis of food group data
library(tidyverse)
library(treelet) # For treelet transform
library(ggdendro)
library(here)
source(here("Code", "treelet_functions.R"))
cat("\n Treelet analysis on food groups\n")
# Generate maximum height tree
# Save basis vectors at all cut levels so the most useful can be selected later
food_groups_tree_full <- Run_JTree(food_groups_cor, maxlev = ncol(food_groups_cor)-1, whichsave = 1:(ncol(food_groups_cor)-1))
# Extract the treelet components and associated variances
food_groups_tc_full <- TTVariances(food_groups_tree_full, food_groups_cor)
# Dendrogram of maximum height tree
food_groups_dendro <- dendro_data(ConvertTTDendro(food_groups_tree_full))
# Plotting order
food_groups_dendro_order <- as.character(food_groups_dendro$labels$label)
# Initial analysis - data driven selection of number of components and cut points
## Cross validation ##
# Data driven cross validation to choose a cut level that can describe the
# data with few components (TCs)
# Set the desired number of components (m) based on scree plot and then find the best cut level
# Cross validation scores for each cut level
m_grps <- 8
cvs_grps <- replicate(5, CrossValidateTT(food_groups_scl, m = m_grps))
# Fit reduced treelet
# Project selected components to get scores for each individual using the selected cut level
# Cut level selected based on cross validation and inspection of dendrogram
# Original
food_groups_tc_reduced <- TTVariances(food_groups_tree_full, cor(food_groups_scl), cut_level = 28, components = m_grps)
food_groups_tc_scores <- food_groups_scl %*% food_groups_tc_reduced$tc
cat("\n Calculate correlation of TC scores ")
food_groups_tc_cor <- cor(food_groups_tc_scores, method = "kendall") %>%
formatC(digits = 2, format = "f")
food_groups_tc_cor[upper.tri(food_groups_tc_cor, diag = T)] <- ""
### Add TC scores to main NHANES dataset
# The resulting dataset is distinct from the one used in the nutrient analysis ('nh')
nh_grps <- bind_cols(nhanes, as_tibble(food_groups_tc_scores)) %>%
# Classify TC scores into deciles
mutate_at(vars(matches("^TC[0-9]{1,2}$")), list(~as.factor(ntile(., n = 10)))) %>%
rename_at(vars(matches("^TC[0-9]{1,2}$")), ~paste0(., "_dec")) %>%
# Raw TC scores
bind_cols(as_tibble(food_groups_tc_scores)) %>%
inner_join(dietary, by = "SEQN") %>%
# Standardise age and KCAL to make predictions easier
mutate(RIDAGEYR = as.numeric(scale(RIDAGEYR)),
KCAL_raw = KCAL,
KCAL = as.numeric(scale(KCAL))) %>%
# Set up outcome variable for Beta regression
mutate(prop_CAL_sites3mm_beta = PropTransform(prop_CAL_sites3mm))
#food_groups_out <- bind_cols(diet, data.frame(food_groups_tc_scores))
WrapLabel <- function(x){
xwrap <- str_wrap(x, width = 30)
# str_pad(
if_else(str_detect(xwrap, "\n"),
as.character(xwrap),
paste0("\n", as.character(xwrap))) %>%
as_factor()
# width = 30, side = "right")
}
# Extract loadings for TCs
food_groups_loadings <- food_groups_tc_reduced$tc %>%
as_tibble(rownames = "Variable") %>%
gather(Component, Value, -Variable) %>%
# Add food group descriptions
inner_join(fgrp %>%
distinct(grp_code, grp_description),
by = c("Variable" = "grp_code")) %>%
# Order by leaf labelling order
mutate(Variable = factor(Variable, levels = food_groups_dendro_order)) %>%
arrange(Variable) %>%
mutate(grp_description = as_factor(grp_description),
grp_padded = WrapLabel(grp_description))
|
## Following the course examples and explanation from DanieleP's Github
## These functions first create an object (a matrix) that stores a list of functions that store the input values (the input matrix) and the solution (the inverse matrix) and allow these values to be retreived.
## The second function retrieves the stored solution (if it has been previously stored) or computes the value (if it had not be previously stored) , then returns this value (the inverse matrix).
## These functions assume matrix supplied is always invertible (as per assignment instructions)
## set () function changes the value of x in the parent function to the input value of y, and restores the value of s (the solution) to NULL
## get () returns the value of x in the parent function (as stored by the 'set' function)
## set solve () stores the computed value of 'solve' (i.e the solution/inverse matrix) as 's'
## returns the value of s (the solution stored by the 'setsolve' function)
makeCacheMatrix <- function(x = matrix()) {
s<- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
get <- function() x
setsolve <- function(solve) s <<- solve
getsolve <- function() s
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve)
}
## The assigned value of 's' (stored using the 'setsolve' function) is returned using the 'getsolve' function
## If the value of 's' is not NULL (i.e., it is the previously stored value), the message is printed along with the stored value of 's'.
## If 's' is NULL then the value of x (the stored input from the 'makeCacheMatrix' function) is assigned to the object 'data' using the 'get' function.
## The inverse matrix is then computed using the 'solve' function and assigned to the object 's'.
## The value of 's' is stored using the 'setsolve' function
## Then the value of 's' (the solution) is printed
cacheSolve <- function(x, ...) {
s <- x$getsolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data, ...)
x$setsolve(s)
s
## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | minilizzie/ProgrammingAssignment2 | R | false | false | 2,114 | r | ## Following the course examples and explanation from DanieleP's Github
## These functions first create an object (a matrix) that stores a list of functions that store the input values (the input matrix) and the solution (the inverse matrix) and allow these values to be retreived.
## The second function retrieves the stored solution (if it has been previously stored) or computes the value (if it had not be previously stored) , then returns this value (the inverse matrix).
## These functions assume matrix supplied is always invertible (as per assignment instructions)
## set () function changes the value of x in the parent function to the input value of y, and restores the value of s (the solution) to NULL
## get () returns the value of x in the parent function (as stored by the 'set' function)
## set solve () stores the computed value of 'solve' (i.e the solution/inverse matrix) as 's'
## returns the value of s (the solution stored by the 'setsolve' function)
makeCacheMatrix <- function(x = matrix()) {
s<- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
get <- function() x
setsolve <- function(solve) s <<- solve
getsolve <- function() s
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve)
}
## The assigned value of 's' (stored using the 'setsolve' function) is returned using the 'getsolve' function
## If the value of 's' is not NULL (i.e., it is the previously stored value), the message is printed along with the stored value of 's'.
## If 's' is NULL then the value of x (the stored input from the 'makeCacheMatrix' function) is assigned to the object 'data' using the 'get' function.
## The inverse matrix is then computed using the 'solve' function and assigned to the object 's'.
## The value of 's' is stored using the 'setsolve' function
## Then the value of 's' (the solution) is printed
cacheSolve <- function(x, ...) {
s <- x$getsolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data, ...)
x$setsolve(s)
s
## Return a matrix that is the inverse of 'x'
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSDA-package.R
\docType{data}
\name{Social}
\alias{Social}
\title{Median income level for 25 social workers from North Carolina}
\format{A data frame with 25 observations on the following variable.
\describe{
\item{income}{a numeric vector}
}}
\description{
Data for Exercise 6.63
}
\examples{
str(Social)
attach(Social)
SIGN.test(income,md=27500,alternative="less")
detach(Social)
}
\references{
Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
Duxbury
}
\keyword{datasets}
| /man/Social.Rd | no_license | johnsonjc6/BSDA | R | false | true | 575 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSDA-package.R
\docType{data}
\name{Social}
\alias{Social}
\title{Median income level for 25 social workers from North Carolina}
\format{A data frame with 25 observations on the following variable.
\describe{
\item{income}{a numeric vector}
}}
\description{
Data for Exercise 6.63
}
\examples{
str(Social)
attach(Social)
SIGN.test(income,md=27500,alternative="less")
detach(Social)
}
\references{
Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
Duxbury
}
\keyword{datasets}
|
# Definición del UI
shinyUI(bootstrapPage(
tags$style(type = "text/css", "html, body {width:100%;height:100%}"),
leafletOutput("map", width = "100%", height = "100%"),
absolutePanel(top = 10, right = 10,
dateRangeInput('rangeDate',label = "Periodo",
start = min(terremotos$Fecha), end = max(terremotos$Fecha),
separator = " - ", startview = 'year', language = 'es',weekstart = 1
)
)
))
| /03_data_managing/R/00_masterClasses/03_graphics_rmarkdown_shiny/shinyExamples/terremoto/ui.R | no_license | AntonioPL/masterDataScience | R | false | false | 416 | r | # Definición del UI
shinyUI(bootstrapPage(
tags$style(type = "text/css", "html, body {width:100%;height:100%}"),
leafletOutput("map", width = "100%", height = "100%"),
absolutePanel(top = 10, right = 10,
dateRangeInput('rangeDate',label = "Periodo",
start = min(terremotos$Fecha), end = max(terremotos$Fecha),
separator = " - ", startview = 'year', language = 'es',weekstart = 1
)
)
))
|
# KAGGLE COMPETITION - GETTING STARTED
# This script file is intended to help you get started on the Kaggle platform, and to show you how to make a submission to the competition.
summary(HeadlineWordsTest)
# Let's start by reading the data into R
# Make sure you have downloaded these files from the Kaggle website, and have navigated to the directory where you saved the files on your computer
# We are adding in the argument stringsAsFactors=FALSE, since we have some text fields
NewsTrain = read.csv("NYTimesBlogTrain.csv", stringsAsFactors=FALSE)
str(NewsTrain)
summary(NewsTrain)
NewsTest = read.csv("NYTimesBlogTest.csv", stringsAsFactors=FALSE)
table(NewsTrain$RR, NewsTrain$Popular)
hist(log(40+NewsTest$WordCount))
NewsTrain$logWC = log(40+NewsTrain$WordCount)
NewsTest$logWC = log(40+NewsTest$WordCount)
HeadlineWordsTest$logWC = NewsTest$logWC
HeadlineWordsTrain$logWC = NewsTrain$logWC
summary(HeadlineWordsTrain)
NewsTrain$AbstractWordCount <- HeadlineWordsTrain$AbstractWordCount
NewsTrain$HeadlineWordCount <- HeadlineWordsTrain$HeadlineWordCount
NewsTrain$AbstractWC <- HeadlineWordsTrain2$AbstractWC
NewsTrain$HeadlineWC <- HeadlineWordsTrain2$HeadlineWC
HeadlineWordsTrain2$HeadlineWC
summary(HeadlineWordsTest)
HeadlineWordsTest$HeadlineWC
HeadlineWordsTrain$HeadlineWC
NewsTest$AbstractWC <- HeadlineWordsTest$AbstractWC
NewsTest$HeadlineWC <- HeadlineWordsTest$HeadlineWC
# We will just create a simple logistic regression model, to predict Popular using WordCount:
SimpleMod = glm(Popular ~ music + abmusic + War + abwar + friday + fashion + abfashion + eu + abRR + comments + RR + Times + pres + abreader+ abreaders + abpuzzle + The + Facebook + qna + obama + nc + apple + recap + fourteen + excl + ten + abferg + Facebook + senator + NYAbs+ test + Ask + morning + we + senator + Facebook + uber + abuber + abebola + abobama + abdollar + ebola + ferg + wordof + report + today + daily + qna + quandary + facts + excl +eighteen + nineteen + HeadlineWC + AbstractWC + OFC + six + Verbatim + picture + Abstractqmark+ NY + Headlineqmark + logWC + NewsDesk + Weekday + SubsectionName + Hour, data=NewsTrain, family=binomial)
summary(SimpleMod)
predictionLog <- predict(SimpleMod, type = "response")
table(predictionLog > 0.5, NewsTrain$Popular)
ROCRpredTrain = prediction(predictionLog, NewsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrain, "auc")@y.values)
predictionLogTest <- predict(SimpleMod, newdata= NewsTest, type = "response")
table(predictionLogTest >0.5)
MySubmission2 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predictionLogTest)
write.csv(MySubmission2, "Logultimateallvaryt.csv", row.names=FALSE)
MySubmission2 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predictionLogTest)
write.csv(MySubmission2, "Logultimateallvarmusicwar.csv", row.names=FALSE)
SimpleCART = rpart(Popular ~senator + Facebook + uber + abuber + abebola + abobama + abdollar + ebola + ferg + wordof + report + today + daily + qna + quandary + facts + excl +eighteen + nineteen + HeadlineWC + AbstractWC + OFC + six + Verbatim + picture + Abstractqmark+ NY + Headlineqmark + logWC + NewsDesk + Weekday + SubsectionName + Hour, data=NewsTrain, minbucket=20, method="class")
prp(SimpleCART)
predictionCART <- predict(SimpleCART, type = "class")
table(predictionCART, NewsTrain$Popular)
# And then make predictions on the test set:
library(ROCR)
ROCRpredTrain = prediction(predictionCART, NewsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrain, "auc")@y.values)
predictionLogTest <- predict(SimpleMod, newdata= NewsTest, type = "response")
table(predictionLogTest> 0.5)
library(rpart)
library(rpart.plot)
TrainCART <- rpart(Popular ~ WordCount + NewsDesk + Weekday + SectionName, data=NewsTrain, method= "class")
prp(TrainCART)
predictionCART <- predict(TrainCART, type = "prob")
predictionCART
table(predictionCART[,2] > 0.5, NewsTrain$Popular)
predictionCART2 <- predict(TrainCART, type = "class")
table(predictionCART2 , NewsTrain$Popular)
summary(predictionCART2)
ROCRpredTrainCART = prediction(predictionCART[,2], NewsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrainCART, "auc")@y.values)
library(randomForest)
SimpleRF = randomForest(as.factor(Popular) ~ music + abmusic + War + abwar + friday+ fashion + abfashion + China + Chinese + yt + SectionName + eu + abRR + comments + RR + Times + pres + abreader+ abreaders + abpuzzle + The + Facebook + qna + obama + nc + apple + recap + fourteen + excl + ten + abferg + Facebook + senator + NYAbs+ test + Ask + morning + we + senator + Facebook + uber + abuber + abebola + abobama + abdollar + ebola + ferg + wordof + report + today + daily + qna + quandary + facts + excl +eighteen + nineteen + HeadlineWC + AbstractWC + OFC + six + Verbatim + picture + Abstractqmark+ NY + Headlineqmark + logWC + NewsDesk + Weekday + SubsectionName + Hour, data=NewsTrain, method="class" , ntree=1000, nodesize=10)
varImpPlot(SimpleRF)
predictionSimpleRF <- predict(SimpleRF, type = "prob")
table(predictionSimpleRF[,2] > 0.5, NewsTrain$Popular)
ROCRpredTrainSimpleRF = prediction(predictionSimpleRF[,2], NewsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrainSimpleRF, "auc")@y.values)
predictionSimpleRFTest <- predict(SimpleRF, newdata= NewsTest, type = "prob")
table(predictionSimpleRFTest[,2]> 0.5)
MySubmission2 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predictionSimpleRFTest[,2])
write.csv(MySubmission2, "SimpleRF2.csv", row.names=FALSE)
SimpleRF2 = randomForest(as.factor(Popular) ~ music + abmusic + War + abwar + friday+ fashion + abfashion + China + Chinese + yt + SectionName + eu + abRR + comments + RR + Times + pres + abreader+ abreaders + abpuzzle + The + Facebook + qna + obama + nc + apple + recap + fourteen + excl + ten + abferg + Facebook + senator + NYAbs+ test + Ask + morning + we + senator + Facebook + uber + abuber + abebola + abobama + abdollar + ebola + ferg + wordof + report + today + daily + qna + quandary + facts + excl +eighteen + nineteen + OFC + six + Verbatim + picture + Abstractqmark+ NY + Headlineqmark + logWC + NewsDesk + Weekday + SubsectionName + Hour, data=NewsTrain, method="class" , ntree=1000, nodesize=10)
varImpPlot(SimpleRF2)
predictionSimpleRF2 <- predict(SimpleRF2, type = "prob")
table(predictionSimpleRF2[,2] > 0.5, NewsTrain$Popular)
ROCRpredTrainSimpleRF2 = prediction(predictionSimpleRF2[,2], NewsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrainSimpleRF2, "auc")@y.values)
predictionSimpleRF2Test <- predict(SimpleRF2, newdata= NewsTest, type = "prob")
table(predictionSimpleRF2Test[,2]> 0.5)
MySubmission2 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predictionSimpleRF2Test[,2])
write.csv(MySubmission2, "SimpleRFnoabstractheadlinewc.csv", row.names=FALSE)
SimpleRF3 = randomForest(as.factor(Popular) ~ SectionName + eu + abRR + comments + RR + Times + pres + abreader+ abreaders + abpuzzle + The + Facebook + qna + obama + nc + apple + recap + fourteen + excl + ten + abferg + Facebook + senator + NYAbs+ test + Ask + morning + we + senator + Facebook + uber + abuber + abebola + abobama + abdollar + ebola + ferg + wordof + report + today + daily + qna + quandary + facts + excl +eighteen + nineteen + OFC + six + Verbatim + picture + Abstractqmark+ NY + Headlineqmark + logWC + NewsDesk + Weekday + SubsectionName + Hour, data=NewsTrain, method="class" , ntree=1000, nodesize=10)
varImpPlot(SimpleRF3)
predictionSimpleRF3 <- predict(SimpleRF3, type = "prob")
table(predictionSimpleRF3[,2] > 0.5, NewsTrain$Popular)
ROCRpredTrainSimpleRF3 = prediction(predictionSimpleRF3[,2], NewsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrainSimpleRF3, "auc")@y.values)
predictionSimpleRF3Test <- predict(SimpleRF3, newdata= NewsTest, type = "prob")
MySubmission3 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predictionSimpleRF3Test[,2])
write.csv(MySubmission2, "SimpleRFnoabstractheadlinewcshort.csv", row.names=FALSE)
summary(NewsTrain2)
NewsTrain2 <- NewsTrain
NewsTrain2 <- NewsTrain
for(i in 1:6532)
{if(NewsTrain2$Popular[i] == 1) NewsTrain2$Popular[i] = "Yes" else NewsTrain2$Popular[i] = "No"}
trgbm = train(as.factor(Popular) ~ music + abmusic + War + abwar + friday+ fashion + abfashion+ China + Chinese + yt + SectionName + eu + abRR + comments + RR + Times + pres + abreader+ abreaders + abpuzzle + The + Facebook + qna + obama + nc + apple + recap + fourteen + excl + ten + abferg + Facebook + senator + NYAbs+ test + Ask + morning + we + senator + Facebook + uber + abuber + abebola + abobama + abdollar + ebola + ferg + wordof + report + today + daily + qna + quandary + facts + excl +eighteen + nineteen + HeadlineWC + AbstractWC + OFC + six + Verbatim + picture + Abstractqmark+ NY + Headlineqmark + logWC + NewsDesk + Weekday + SubsectionName + Hour, NewsTrain2, method="gbm", distribution= "bernoulli", metric="ROC", trControl=fitControl, verbose= FALSE)
predictiongbm <- predict(trgbm, NewsTrain2, type = "prob")
table(predictiongbm[,2] > 0.5, NewsTrain$Popular)
ROCRpredTraingbm = prediction(predictiongbm[,2], NewsTrain$Popular)
auc = as.numeric(performance(ROCRpredTraingbm, "auc")@y.values)
trgbm2 = train(as.factor(Popular) ~ music + abmusic + War + abwar + friday+ fashion + abfashion+ China + Chinese + yt + SectionName + eu + abRR + comments + RR + Times + pres + abreader+ abreaders + abpuzzle + The + Facebook + qna + obama + nc + apple + recap + fourteen + excl + ten + abferg + Facebook + senator + NYAbs+ test + Ask + morning + we + senator + Facebook + uber + abuber + abebola + abobama + abdollar + ebola + ferg + wordof + report + today + daily + qna + quandary + facts + excl +eighteen + nineteen + OFC + six + Verbatim + picture + Abstractqmark+ NY + Headlineqmark + logWC + NewsDesk + Weekday + SubsectionName + Hour, NewsTrain2, method="gbm", distribution= "bernoulli", metric="ROC", trControl=fitControl, verbose= FALSE)
predictiongbm2 <- predict(trgbm2, NewsTrain2, type = "prob")
predictiongbm <- predict(trgbm, NewsTrain2, type = "prob")
predictiongbmTest <- predict(trgbm, NewsTest, type = "prob")
predictiongbmTest2 <- predict(trgbm2, NewsTest, type = "prob")
predictiongbmTest
table(predictiongbmTest[,2]> 0.5)
MySubmission2 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predictiongbmTest[,2])
write.csv(MySubmission2, "Simplegbm2.csv", row.names=FALSE)
MySubmission3 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predictiongbmTest2[,2])
write.csv(MySubmission3, "Simplegbmnowc.csv", row.names=FALSE)
NewsTrain$SectionName = as.factor(NewsTrain$SectionName)
NewsTrain$NewsDesk = as.factor(NewsTrain$NewsDesk)
NewsTrain$SubsectionName = as.factor(NewsTrain$SubsectionName)
NewsTest$SectionName = factor(NewsTest$SectionName, levels= levels(NewsTrain$SectionName))
NewsTest$NewsDesk = factor(NewsTest$NewsDesk, levels= levels(NewsTrain$NewsDesk))
NewsTest$SubsectionName = factor(NewsTest$SubsectionName, levels= levels(NewsTrain$SubsectionName))
summary(NewsTrain)
summary(NewsTest)
table(NewsTrain$SectionName)
TrainForest <- randomForest(as.factor(Popular) ~ WordCount + NewsDesk + Weekday + SectionName, data=NewsTrain, method= "class")
predictionForest <- predict(TrainForest, type = "prob")
predictionForest
table(predictionForest[,2] > 0.5, NewsTrain$Popular)
ROCRpredTrainForest = prediction(predictionForest[,2], NewsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrainForest, "auc")@y.values)
predictionForest2 <- predict(TrainForest, newdata= NewsTest, type = "prob")
predictionForest2[,2]
MySubmission2 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predictionForest2[,2])
write.csv(MySubmission2, "RandomForestfirst.csv", row.names=FALSE)
# We can't compute the accuracy or AUC on the test set ourselves, since we don't have the dependent variable on the test set (you can compute it on the training set though!).
# However, you can submit the file on Kaggle to see how well the model performs. You can make up to 5 submissions per day, so don't hesitate to just upload a solution to see how you did.
# Let's prepare a submission file for Kaggle (for more about this, see the "Evaluation" page on the competition site):
predTest <- predict(SimpleMod, newdata= NewsTest, type = "response")
MySubmission = data.frame(UniqueID = NewsTest$UniqueID, Probability1 = PredTest)
write.csv(MySubmission, "SubmissionSimpleLog.csv", row.names=FALSE)
# You should upload the submission "SubmissionSimpleLog.csv" on the Kaggle website to use this as a submission to the competition
# This model was just designed to help you get started - to do well in the competition, you will need to build better models!
# One more helpful hint:
# This dataset has a date/time field (PubDate). You might remember dealing with date and time data in some of the Unit 1 homework problems.
# In this dataset, the following commands might be useful to you when trying to get date and time variables.
# To convert the date/time to something R will understand, you can use the following commands:
NewsTrain$PubDate = strptime(NewsTrain$PubDate, "%Y-%m-%d %H:%M:%S")
NewsTest$PubDate = strptime(NewsTest$PubDate, "%Y-%m-%d %H:%M:%S")
summary(NewsTrain)
summary(NewsTrain$PubDate)
# The second argument tells the strptime function how the data is formatted.
# If you opened the file in Excel or another spreadsheet software before loading it into R, you might have to adjust the format.
# See the help page ?strptime for more information.
# Now that R understands this field, there are many different attributes of the date and time that you can extract.
# For example, you can add a variable to your datasets called "Weekday" that contains the day of the week that the article was published (0 = Sunday, 1 = Monday, etc.), by using the following commands:
NewsTrain$Weekday = NewsTrain$PubDate$wday
NewsTest$Weekday = NewsTest$PubDate$wday
NewsTrain$Hour = NewsTrain$PubDate$hour
NewsTest$Hour = NewsTest$PubDate$hour
NewsTrain$Minute = NewsTrain$PubDate$min
NewsTest$Minute = NewsTest$PubDate$min
NewsTrain$Second = NewsTrain$PubDate$sec
NewsTest$Second = NewsTest$PubDate$sec
summary(NewsTest)
table(NewsTrain$Second)
table(NewsTrain$Weekday)
# Weekday could now be used as an independent variable in your predictive models.
# For more fields that you can extract from a date/time object in R, see the help page ?POSIXlt
library(tm)
library(SnowballC)
# Then create a corpus from the headline variable. You can use other variables in the dataset for text analytics, but we will just show you how to use this particular variable.
# Note that we are creating a corpus out of the training and testing data.
CorpusHeadline = Corpus(VectorSource(c(NewsTrain$Headline, NewsTest$Headline)))
# You can go through all of the standard pre-processing steps like we did in Unit 5:
CorpusHeadline = tm_map(CorpusHeadline, tolower)
# Remember this extra line is needed after running the tolower step:
CorpusHeadline = tm_map(CorpusHeadline, PlainTextDocument)
CorpusHeadline = tm_map(CorpusHeadline, removePunctuation)
CorpusHeadline = tm_map(CorpusHeadline, removeWords, stopwords("english"))
CorpusHeadline = tm_map(CorpusHeadline, stemDocument)
# Now we are ready to convert our corpus to a DocumentTermMatrix, remove sparse terms, and turn it into a data frame.
# We selected one particular threshold to remove sparse terms, but remember that you can try different numbers!
dtm = DocumentTermMatrix(CorpusHeadline)
sparse = removeSparseTerms(dtm, 0.995)
HeadlineWords = as.data.frame(as.matrix(sparse))
# Let's make sure our variable names are okay for R:
colnames(HeadlineWords) = make.names(colnames(HeadlineWords))
# Now we need to split the observations back into the training set and testing set.
# To do this, we can use the head and tail functions in R.
# The head function takes the first "n" rows of HeadlineWords (the first argument to the head function), where "n" is specified by the second argument to the head function.
# So here we are taking the first nrow(NewsTrain) observations from HeadlineWords, and putting them in a new data frame called "HeadlineWordsTrain"
HeadlineWordsTrain = head(HeadlineWords, nrow(NewsTrain))
# The tail function takes the last "n" rows of HeadlineWords (the first argument to the tail function), where "n" is specified by the second argument to the tail function.
# So here we are taking the last nrow(NewsTest) observations from HeadlineWords, and putting them in a new data frame called "HeadlineWordsTest"
HeadlineWordsTest = tail(HeadlineWords, nrow(NewsTest))
# Note that this split of HeadlineWords works to properly put the observations back into the training and testing sets, because of how we combined them together when we first made our corpus.
# Before building models, we want to add back the original variables from our datasets. We'll add back the dependent variable to the training set, and the WordCount variable to both datasets. You might want to add back more variables to use in your model - we'll leave this up to you!
HeadlineWordsTrain$Popular = NewsTrain$Popular
HeadlineWordsTrain$WordCount = NewsTrain$WordCount
HeadlineWordsTest$WordCount = NewsTest$WordCount
HeadlineWordsTrain$Weekday = NewsTrain$Weekday
HeadlineWordsTest$Weekday = NewsTest$Weekday
HeadlineWordsTrain$Hour= NewsTrain$Hour
HeadlineWordsTest$Hour = NewsTest$Hour
HeadlineWordsTrain$NewsDesk = NewsTrain$NewsDesk
HeadlineWordsTest$NewsDesk = NewsTest$NewsDesk
HeadlineWordsTrain$SectionName = NewsTrain$SectionName
HeadlineWordsTest$SectionName = NewsTest$SectionName
HeadlineWordsTrain$SubsectionName = NewsTrain$SectionName
HeadlineWordsTest$SubsectionName = NewsTest$SectionName
HeadlineWords$Popular = NewsTrain$Popular
sort(colSums(subset(HeadlineWordsTrain, Popular == 1)))
sort(colSums(HeadlineWordsTest))
HeadlineWordsTrain$HeadlineWC = rowSums(HeadlineWordsTrain)
HeadlineWordsTest$HeadlineWC = rowSums(HeadlineWordsTest)
sort(colSums(HeadlineWordsTrain))
summary(HeadlineWordsTest)
str(HeadlineWordsTrain)
summary(HeadlineWordsTrain)
HeadlineWordsTrain2 = HeadlineWordsTrain
HeadlineWordsTrain2$Popular = NewsTrain$Popular
summary(HeadlineWordsTrain2)
table(HeadlineWordsTrain$Popular)
table(NewsTrain$Popular)
HeadlineWordsLog <- glm(Popular ~ AbstractWordCount + HeadlineWordCount + appl + ebola + obama + day+ fashion + morn + new + read + today + word + logWC + NewsDesk + Weekday + SectionName + SubsectionName + Hour , data= HeadlineWordsTrain2, family= binomial)
summary(HeadlineWordsLog )
HeadlineWordsLog2 <- glm(Popular ~ . , data= HeadlineWordsTrain, family= binomial)
summary(HeadlineWordsLog2)
predHeadlineWordsLog <- predict(HeadlineWordsLog , type = "response")
table(predHeadlineWordsLog > 0.5, HeadlineWordsTrain$Popular)
ROCRpredTrain = prediction(predHeadlineWordsLog2, HeadlineWordsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrain, "auc")@y.values)
HeadlineWordsForest <- randomForest(as.factor(Popular) ~ . , data= HeadlineWordsTrain, method= "class")
predHeadlineWordsForest <- predict(HeadlineWordsForest, type="prob")
table(predHeadlineWordsForest[,2] > 0.5, HeadlineWordsTrain$Popular)
ROCRpredTrain = prediction(predHeadlineWordsForest[,2], HeadlineWordsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrain, "auc")@y.values)
HeadlineWordsForest2 <- randomForest(as.factor(Popular) ~ music + recap + war + day+ fashion + morn + new + read + today + word + WordCount + NewsDesk + Weekday + SectionName + SubsectionName, data= HeadlineWordsTrain, method= "class")
predHeadlineWordsForest2 <- predict(HeadlineWordsForest2, type="prob")
table(predHeadlineWordsForest2[,2] > 0.5, HeadlineWordsTrain$Popular)
ROCRpredTrain = prediction(predHeadlineWordsForest2[,2], HeadlineWordsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrain, "auc")@y.values)
predHeadlineWordsForest3 <- predict(HeadlineWordsForest2, newdata= HeadlineWordsTest, type="prob")
predHeadlineWordsForest3[,2]
MySubmission3 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predHeadlineWordsForest3[,2])
write.csv(MySubmission3, "RandomForestheadline.csv", row.names=FALSE)
NewsTrain$Abstract[2000]
NewsTrain$Snippet[2000]
###
CorpusAbstract = Corpus(VectorSource(c(NewsTrain$Abstract, NewsTest$Abstract)))
# You can go through all of the standard pre-processing steps like we did in Unit 5:
CorpusAbstract = tm_map(CorpusAbstract, tolower)
# Remember this extra line is needed after running the tolower step:
CorpusAbstract = tm_map(CorpusAbstract, PlainTextDocument)
CorpusAbstract = tm_map(CorpusAbstract, removePunctuation)
CorpusAbstract = tm_map(CorpusAbstract, removeWords, stopwords("english"))
CorpusAbstract = tm_map(CorpusAbstract, stemDocument)
# Now we are ready to convert our corpus to a DocumentTermMatrix, remove sparse terms, and turn it into a data frame.
# We selected one particular threshold to remove sparse terms, but remember that you can try different numbers!
dtm = DocumentTermMatrix(CorpusAbstract)
sparse = removeSparseTerms(dtm, 0.995)
AbstractWords = as.data.frame(as.matrix(sparse))
# Let's make sure our variable names are okay for R:
colnames(AbstractWords) = make.names(colnames(AbstractWords))
# Now we need to split the observations back into the training set and testing set.
# To do this, we can use the head and tail functions in R.
# The head function takes the first "n" rows of AbstractWords (the first argument to the head function), where "n" is specified by the second argument to the head function.
# So here we are taking the first nrow(NewsTrain) observations from AbstractWords, and putting them in a new data frame called "AbstractWordsTrain"
AbstractWordsTrain = head(AbstractWords, nrow(NewsTrain))
# The tail function takes the last "n" rows of AbstractWords (the first argument to the tail function), where "n" is specified by the second argument to the tail function.
# So here we are taking the last nrow(NewsTest) observations from AbstractWords, and putting them in a new data frame called "AbstractWordsTest"
AbstractWordsTest = tail(AbstractWords, nrow(NewsTest))
summary(AbstractWordsTrain)
colnames(AbstractWordsTrain) <- paste("Ab", colnames(AbstractWordsTrain), sep = "_")
colnames(AbstractWordsTest) <- paste("Ab", colnames(AbstractWordsTest), sep = "_")
WordsTrain <- cbind(HeadlineWordsTrain , AbstractWordsTrain)
summary(WordsTrain)
WordsTest <- cbind(HeadlineWordsTest , AbstractWordsTest)
summary(WordsTest)
sort(colSums(subset(WordsTrain, Popular == 1)))
sort(colSums(subset(HeadlineWordsTrain, Popular == 1)))
sort(colSums(AbstractWordsTest))
sort(colSums(AbstractWordsTrain))
summary(HeadlineWordsTrain)
summary(HeadlineWordsTest)
HeadlineWordsTrain$AbstractWC = rowSums(AbstractWordsTrain)
HeadlineWordsTest$AbstractWC = rowSums(AbstractWordsTest)
hist(HeadlineWordsTrain$HeadlineWC)
hist(HeadlineWordsTest$AbstractWC )
WordsLog <- glm(Popular ~ Hour + AbstractWC + day+ fashion + morn + new + read + today + word + logWC + NewsDesk + Weekday + Ab_play + Ab_american + Ab_latest + Ab_live + Ab_music + Ab_recent + Ab_say + Ab_school + Ab_system + Ab_women +Ab_write+ Ab_reader + Ab_play, data= WordsTrain, family= binomial)
summary(WordsLog )
WordsTrain2 <- WordsTrain
WordsTrain2$Popular <- NewsTrain$Popular
WordsLog2 <- glm(Popular ~ . , data= WordsTrain2, family= binomial)
summary(WordsLog2 )
predWordsLog = predict(WordsLog, data= WordsTrain, type = "response" )
table(WordsTrain$Popular, predWordsLog > 0.5)
predWordsLog
ROCRpredTrain = prediction(predWordsLog, WordsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrain, "auc")@y.values)
predWordsLog2 = predict(WordsLog2, data= WordsTrain, type = "response" )
table(WordsTrain$Popular, predWordsLog2 > 0.5)
ROCRpredTrain2 = prediction(predWordsLog2, WordsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrain2, "auc")@y.values)
predWordsLog3 = predict(WordsLog, newdata= WordsTest, type = "response" )
predWordsLog3
MySubmission4 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predWordsLog3)
write.csv(MySubmission4, "LogRegressioncorpus.csv", row.names=FALSE)
WordsForest <- randomForest(as.factor(Popular) ~ music+ recap + war + day+ fashion + morn + new + read + today + word + WordCount + NewsDesk + Weekday + SubsectionName+ SectionName + Sub_play + Sub_american + Sub_latest + Sub_live + Sub_music + Sub_recent + Sub_say + Sub_school + Sub_system + Sub_women + Sub_write+ Sub_reader + Sub_play, data= WordsTrain, method= "class", ntree=1000, nodesize=15)
summary(WordsForest )
predWordsForest = predict(WordsForest, data= WordsTrain, type = "prob" )
table(WordsTrain$Popular, predWordsForest[,2] > 0.5)
ROCRpredTrain = prediction(predWordsForest[,2], WordsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrain, "auc")@y.values)
WordsForest2 <- randomForest(as.factor(Popular) ~ . , data= WordsTrain, method= "class", ntree= 1000, nodesize= 15)
summary(WordsForest2 )
predWordsForest2 = predict(WordsForest2, data= WordsTrain, type = "prob" )
table(WordsTrain$Popular, predWordsForest2[,2] > 0.5)
ROCRpredTrain2 = prediction(predWordsForest2[,2], WordsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrain2, "auc")@y.values)
predWordsForest3 = predict(WordsForest, newdata= WordsTest, type = "prob" )
MySubmission5 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predWordsForest3[,2])
write.csv(MySubmission5, "RandomForestcorpus.csv", row.names=FALSE)
WordsCART1 = rpart(Popular ~ music+ recap + war + day+ fashion + morn + new + read + today + word + WordCount + NewsDesk + Weekday + SectionName + Sub_play + Sub_american + Sub_latest + Sub_live + Sub_music + Sub_recent + Sub_say + Sub_school + Sub_system + Sub_women + Sub_write+ Sub_reader + Sub_play, data= WordsTrain, method= "class")
predWordsCART1 = predict(WordsCART1, data=WordsTrain, type = "prob" )
prp(WordsCART1)
table(WordsTrain$Popular, predWordsCART1[,2] > 0.5)
ROCRpredTrain = prediction(predWordsCART1[,2], WordsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrain, "auc")@y.values)
str(NewsTrain$Headline)
#Feature engineering based on common phrases, punctuation marks, etc.
NewsTrain$qna = grepl("Q. and A." , NewsTrain$Headline )
NewsTest$qna = grepl("Q. and A." , NewsTest$Headline )
NewsTrain$Headlineqmark = grepl("\\?" , NewsTrain$Headline )
NewsTest$Headlineqmark = grepl("\\?" , NewsTest$Headline )
table(NewsTrain$Headlineqmark , NewsTrain$Popular)
table(NewsTrain$abdollar , NewsTrain$Popular)
table(NewsTrain$headlinequote , NewsTrain$Popular)
NewsTrain$Abstractqmark = grepl("\\?" , NewsTrain$Abstract )
NewsTest$Abstractqmark = grepl("\\?" , NewsTest$Abstract )
NewsTrain$weirdmark= grepl("\\|" , NewsTrain$Headline )
NewsTrain$comma= grepl("\\," , NewsTrain$Headline )
NewsTrain$dollar= grepl("\\$" , NewsTrain$Headline )
NewsTest$dollar= grepl("\\$" , NewsTest$Headline )
NewsTrain$abdollar= grepl("\\$" , NewsTrain$Abstract )
NewsTest$abdollar= grepl("\\$" , NewsTest$Abstract )
HeadlineWordsTrain$abdollar <- NewsTrain$abdollar
HeadlineWordsTest$abdollar <- NewsTest$abdollar
NewsTrain$headlinequote = grepl("\\'" , NewsTrain$Headline )
NewsTrain$period = grepl("\\." , NewsTrain$Headline )
NewsTrain$colon = grepl("\\:" , NewsTrain$Headline )
NewsTest$colon = grepl("\\:" , NewsTest$Headline )
NewsTrain$Abcolon = grepl("\\:" , NewsTrain$Abstract )
NewsTest$Abcolon = grepl("\\:" , NewsTest$Abstract )
WordsTrain$nineteen <- NewsTrain$nineteen
WordsTrain$eighteen <- NewsTrain$eighteen
NewsTrain$nineteen = grepl("19" , NewsTrain$Headline )
NewsTrain$eighteen = grepl("18" , NewsTrain$Headline )
summary(NewsTrain)
NewsTest$nineteen = grepl("19" , NewsTest$Headline )
NewsTest$eighteen = grepl("18" , NewsTest$Headline )
summary(NewsTest)
WordsTest$nineteen <- NewsTest$nineteen
WordsTest$eighteen <- NewsTest$eighteen
WordsTest$eighteen
NewsTrain$fourteen = grepl("2014" , NewsTrain$Headline )
NewsTest$fourteen = grepl("2014" , NewsTest$Headline )
grepl("\\$" , NewsTrain$Headline )
NewsTrain$OFC = grepl("Open for Comments" , NewsTrain$Headline )
NewsTest$OFC = grepl("Open for Comments" , NewsTest$Headline )
HeadlineWordsTrain$OFC = NewsTrain$OFC
HeadlineWordsTest$OFC = NewsTest$OFC
NewsTrain$Obama = grepl("Obama" , NewsTrain$Headline )
table(NewsTrain$Obama , NewsTrain$Popular)
NewsTrain$review = grepl("review" , NewsTrain$Headline )
table(NewsTrain$review , NewsTrain$Popular)
NewsTrain$movie = grepl("review" , NewsTrain$Headline )
table(NewsTrain$movie , NewsTrain$Popular)
NewsTrain$NY = grepl("\\New York" , NewsTrain$Headline )
NewsTest$NY = grepl("\\New York" , NewsTest$Headline )
HeadlineWordsTrain$NY = NewsTrain$NY
HeadlineWordsTest$NY = NewsTest$NY
NewsTrain$comments = grepl("\\Comments" , NewsTrain$Headline )
NewsTrain$comments = grepl("comments" , NewsTrain$Abstract )
NewsTest$comments = grepl("comments" , NewsTest$Abstract )
NewsTrain$wordof = grepl("Word of" , NewsTrain$Headline )
NewsTrain$NYAbs = grepl("\\New York" , NewsTrain$Abstract )
NewsTrain$obama = grepl("\\Obama" , NewsTrain$Headline )
NewsTrain$abobama = grepl("\\Obama" , NewsTrain$Abstract)
NewsTrain$report = grepl("\\Report" , NewsTrain$Headline )
NewsTrain$today = grepl("Today in" , NewsTrain$Headline )
NewsTrain$ten = grepl("10" , NewsTrain$Headline )
NewsTrain$t = grepl("\\T's" , NewsTrain$Headline )
NewsTrain$A = grepl("A " , NewsTrain$Headline )
NewsTrain$we = grepl("What We" , NewsTrain$Headline )
NewsTest$comments = grepl("\\Comments" , NewsTest$Headline )
NewsTest$wordof = grepl("Word of" , NewsTest$Headline )
NewsTest$NYAbs = grepl("\\New York" , NewsTest$Abstract )
NewsTest$obama = grepl("\\Obama" , NewsTest$Headline )
NewsTest$abobama = grepl("\\Obama" , NewsTest$Abstract)
NewsTest$report = grepl("\\Report" , NewsTest$Headline )
NewsTest$today = grepl("Today in" , NewsTest$Headline )
NewsTest$ten = grepl("10" , NewsTest$Headline )
NewsTest$t = grepl("\\T's" , NewsTest$Headline )
NewsTest$A = grepl("A " , NewsTest$Headline )
NewsTest$we = grepl("What We" , NewsTest$Headline )
NewsTrain$Verbatim = grepl("Verbatim" , NewsTrain$Headline )
NewsTest$Verbatim = grepl("Verbatim" , NewsTest$Headline )
HeadlineWordsTrain$Verbatim = NewsTrain$Verbatim
HeadlineWordsTest$Verbatim = NewsTest$Verbatim
table(NewsTrain$picture, NewsTrain$Popular)
NewsTrain$picture= grepl("\\Pictures of", NewsTrain$Headline)
NewsTest$picture= grepl("\\Pictures of", NewsTest$Headline)
HeadlineWordsTrain$picture= NewsTrain$picture
HeadlineWordsTest$picture= NewsTest$picture
NewsTrain$six= grepl("6 Q", NewsTrain$Headline)
NewsTest$six= grepl("6 Q", NewsTest$Headline)
HeadlineWordsTrain$six= NewsTrain$six
HeadlineWordsTest$six= NewsTest$six
HeadlineWordsTrain$daily <- NewsTrain$daily
HeadlineWordsTest$daily <- NewsTest$daily
NewsTrain$daily= grepl("Daily", NewsTrain$Headline)
NewsTest$daily= grepl("Daily", NewsTest$Headline)
NewsTrain$DC= grepl("Daily Clip", NewsTrain$Headline)
NewsTest$DC= grepl("Daily Clip", NewsTest$Headline)
NewsTrain$test= grepl("Test Yourself", NewsTrain$Headline)
NewsTest$test= grepl("Test Yourself", NewsTest$Headline)
NewsTrain$DR= grepl("Daily Report", NewsTrain$Headline)
NewsTest$DR= grepl("Daily Report", NewsTest$Headline)
NewsTrain$Ask = grepl("Ask Well", NewsTrain$Headline)
NewsTest$Ask = grepl("Ask Well", NewsTest$Headline)
NewsTrain$nc = grepl("No Comment", NewsTrain$Headline)
NewsTest$nc = grepl("No Comment", NewsTest$Headline)
NewsTrain$China = grepl("China", NewsTrain$Headline)
NewsTest$China = grepl("China", NewsTest$Headline)
NewsTrain$Chinese = grepl("Chinese", NewsTrain$Headline)
NewsTest$Chinese = grepl("Chinese", NewsTest$Headline)
WordsTrain$ferg <- NewsTrain$ferg
WordsTest$ferg <- NewsTest$ferg
NewsTrain$ferg = grepl("\\Ferguson", NewsTrain$Headline)
NewsTest$ferg = grepl("\\Ferguson", NewsTest$Headline)
NewsTrain$abferg = grepl("\\Ferguson", NewsTrain$Abstract)
NewsTest$abferg = grepl("\\Ferguson", NewsTest$Abstract)
WordsTrain$abferg <- NewsTrain$abferg
WordsTest$abferg <- NewsTest$abferg
NewsTrain$ebola = grepl("\\Ebola", NewsTrain$Headline)
NewsTest$ebola = grepl("\\Ebola", NewsTest$Headline)
HeadlineWordsTrain$abebola <- NewsTrain$abebola
HeadlineWordsTest$abebola <- NewsTest$abebola
NewsTrain$abebola = grepl("\\Ebola", NewsTrain$Abstract)
NewsTest$abebola = grepl("\\Ebola", NewsTest$Abstract)
NewsTrain$abuber = grepl("\\Uber", NewsTrain$Abstract)
NewsTest$abuber = grepl("\\Uber", NewsTest$Abstract)
NewsTrain$abpuzzle = grepl("\\puzzle", NewsTrain$Abstract)
NewsTest$abpuzzle = grepl("\\puzzle", NewsTest$Abstract)
NewsTrain$abreader = grepl("reader", NewsTrain$Abstract)
NewsTest$abreader = grepl("reader", NewsTest$Abstract)
NewsTrain$abreaders = grepl("readers", NewsTrain$Abstract)
NewsTest$abreaders = grepl("readers", NewsTest$Abstract)
NewsTrain$pres = grepl("president", NewsTrain$Abstract)
NewsTest$pres = grepl("president", NewsTest$Abstract)
NewsTrain$abTimes = grepl("Times", NewsTrain$Abstract)
NewsTest$abTimes = grepl("Times", NewsTest$Abstract)
NewsTrain$Times = grepl("Times", NewsTrain$Headline)
NewsTest$Times = grepl("Times", NewsTest$Headline)
summary(NewsTrain)
table(NewsTrain$abuber, NewsTrain$Popular)
HeadlineWordsTrain$abuber<- NewsTrain$abuber
HeadlineWordsTest$abuber<- NewsTest$abuber
NewsTrain$uber = grepl("Uber", NewsTrain$Headline)
NewsTest$uber = grepl("Uber", NewsTest$Headline)
NewsTrain$The = grepl("The", NewsTrain$Headline)
NewsTest$The = grepl("The", NewsTest$Headline)
HeadlineWordsTrain$uber<- NewsTrain$uber
HeadlineWordsTest$uber<- NewsTest$uber
NewsTrain$Facebook = grepl("Facebook", NewsTrain$Headline)
NewsTest$Facebook = grepl("Facebook", NewsTest$Headline)
NewsTrain$RR = grepl("Readers Respond", NewsTrain$Headline)
NewsTest$RR = grepl("Readers Respond", NewsTest$Headline)
NewsTrain$yt = grepl("Your Turn", NewsTrain$Headline)
NewsTest$yt = grepl("Your Turn", NewsTest$Headline)
NewsTrain$fashion = grepl("Fashion", NewsTrain$Headline)
NewsTest$fashion = grepl("Fashion", NewsTest$Headline)
NewsTrain$abfashion = grepl("fashion", NewsTrain$Abstract)
NewsTest$abfashion = grepl("fashion", NewsTest$Abstract)
NewsTrain$abRR = grepl("Readers", NewsTrain$Abstract)
NewsTest$abRR = grepl("Readers", NewsTest$Abstract)
NewsTrain$senator = grepl("Senator", NewsTrain$Abstract)
NewsTest$senator = grepl("Senator", NewsTest$Abstract)
NewsTrain$eu = grepl("Euro", NewsTrain$Headline)
NewsTest$eu = grepl("Euro", NewsTest$Headline)
NewsTrain$music = grepl("Music", NewsTrain$Headline)
NewsTest$music = grepl("Music", NewsTest$Headline)
NewsTrain$abmusic = grepl("music", NewsTrain$Abstract)
NewsTest$abmusic = grepl("music", NewsTest$Abstract)
NewsTrain$War = grepl("War", NewsTrain$Headline)
NewsTest$War = grepl("War", NewsTest$Headline)
NewsTrain$abwar = grepl("war", NewsTrain$Abstract)
NewsTest$abwar = grepl("war", NewsTest$Abstract)
summary(NewsTrain)
table(NewsTrain$ferg, NewsTrain$Popular)
table(NewsTrain$ebola, NewsTrain$Popular)
HeadlineWordsTrain$nc = NewsTrain$nc
HeadlineWordsTest$nc =NewsTest$nc
NewsTrain$recap = grepl("\\Recap", NewsTrain$Headline)
NewsTrain$facts = grepl("Facts", NewsTrain$Headline)
NewsTrain$morning = grepl("Morning Agenda", NewsTrain$Headline)
NewsTrain$friday = grepl("Friday Night", NewsTrain$Headline)
NewsTrain$apple = grepl("\\Apple", NewsTrain$Headline)
NewsTrain$quandary = grepl("Weekly Quandary", NewsTrain$Headline)
NewsTrain$why = grepl("Why", NewsTrain$Headline)
NewsTrain$excl = grepl("\\!", NewsTrain$Headline)
NewsTrain$posvar = NewsTrain$quandary | NewsTrain$facts | NewsTrain$nc | NewsTrain$Ask
NewsTest$recap = grepl("\\Recap", NewsTest$Headline)
NewsTest$facts = grepl("Facts", NewsTest$Headline)
NewsTest$morning = grepl("Morning Agenda", NewsTest$Headline)
NewsTest$friday = grepl("Friday Night", NewsTest$Headline)
NewsTest$apple = grepl("\\Apple", NewsTest$Headline)
NewsTest$quandary = grepl("Weekly Quandary", NewsTest$Headline)
NewsTest$why = grepl("Why", NewsTest$Headline)
NewsTest$excl = grepl("\\!", NewsTest$Headline)
table(NewsTrain$recap, NewsTrain$Popular)
table(NewsTrain$excl, NewsTrain$Popular)
table(grepl("Open for Comments" , NewsTest$Headline ))
install.packages("startsWith")
=NewsTrain$recap
=NewsTrain$facts
=NewsTrain$morning
=NewsTrain$friday
=NewsTrain$apple
NewsTrain$quandary
NewsTrain$why
NewsTrain$excl
NewsTest$recap
NewsTest$facts
NewsTest$morning
NewsTest$friday
NewsTest$apple
NewsTest$quandary
NewsTest$why
NewsTest$excl
library(startsWith)
startsWith(NewsTrain$Headline, A, trim=FALSE, ignore.case=FALSE)
summary(NewsTrain)
summary(NewsTest)
pop <- subset(HeadlineWordsTrain, Popular == "Yes")
summary(pop)
hist(pop$AbstractWordCount)
hist(HeadlineWordsTrain$AbstractWC)
table(NewsTrain$NY, NewsTrain$Popular)
HeadlineWordsTrain$AbstractWordCount = sapply(gregexpr("\\W+", NewsTrain$Abstract), length)
HeadlineWordsTrain$HeadlineWordCount = sapply(gregexpr("\\W+", NewsTrain$Headline), length)
hist(log(1 + NewsTrain$HeadlineWC))
hist(HeadlineWordsTrain$HeadlineWordCount)
| /Final script.R | no_license | mananshah1/Kaggle-NY-times | R | false | false | 38,133 | r | # KAGGLE COMPETITION - GETTING STARTED
# This script file is intended to help you get started on the Kaggle platform, and to show you how to make a submission to the competition.
summary(HeadlineWordsTest)
# Let's start by reading the data into R
# Make sure you have downloaded these files from the Kaggle website, and have navigated to the directory where you saved the files on your computer
# We are adding in the argument stringsAsFactors=FALSE, since we have some text fields
NewsTrain = read.csv("NYTimesBlogTrain.csv", stringsAsFactors=FALSE)
str(NewsTrain)
summary(NewsTrain)
NewsTest = read.csv("NYTimesBlogTest.csv", stringsAsFactors=FALSE)
table(NewsTrain$RR, NewsTrain$Popular)
hist(log(40+NewsTest$WordCount))
NewsTrain$logWC = log(40+NewsTrain$WordCount)
NewsTest$logWC = log(40+NewsTest$WordCount)
HeadlineWordsTest$logWC = NewsTest$logWC
HeadlineWordsTrain$logWC = NewsTrain$logWC
summary(HeadlineWordsTrain)
NewsTrain$AbstractWordCount <- HeadlineWordsTrain$AbstractWordCount
NewsTrain$HeadlineWordCount <- HeadlineWordsTrain$HeadlineWordCount
NewsTrain$AbstractWC <- HeadlineWordsTrain2$AbstractWC
NewsTrain$HeadlineWC <- HeadlineWordsTrain2$HeadlineWC
HeadlineWordsTrain2$HeadlineWC
summary(HeadlineWordsTest)
HeadlineWordsTest$HeadlineWC
HeadlineWordsTrain$HeadlineWC
NewsTest$AbstractWC <- HeadlineWordsTest$AbstractWC
NewsTest$HeadlineWC <- HeadlineWordsTest$HeadlineWC
# We will just create a simple logistic regression model, to predict Popular using WordCount:
SimpleMod = glm(Popular ~ music + abmusic + War + abwar + friday + fashion + abfashion + eu + abRR + comments + RR + Times + pres + abreader+ abreaders + abpuzzle + The + Facebook + qna + obama + nc + apple + recap + fourteen + excl + ten + abferg + Facebook + senator + NYAbs+ test + Ask + morning + we + senator + Facebook + uber + abuber + abebola + abobama + abdollar + ebola + ferg + wordof + report + today + daily + qna + quandary + facts + excl +eighteen + nineteen + HeadlineWC + AbstractWC + OFC + six + Verbatim + picture + Abstractqmark+ NY + Headlineqmark + logWC + NewsDesk + Weekday + SubsectionName + Hour, data=NewsTrain, family=binomial)
summary(SimpleMod)
predictionLog <- predict(SimpleMod, type = "response")
table(predictionLog > 0.5, NewsTrain$Popular)
ROCRpredTrain = prediction(predictionLog, NewsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrain, "auc")@y.values)
predictionLogTest <- predict(SimpleMod, newdata= NewsTest, type = "response")
table(predictionLogTest >0.5)
MySubmission2 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predictionLogTest)
write.csv(MySubmission2, "Logultimateallvaryt.csv", row.names=FALSE)
MySubmission2 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predictionLogTest)
write.csv(MySubmission2, "Logultimateallvarmusicwar.csv", row.names=FALSE)
SimpleCART = rpart(Popular ~senator + Facebook + uber + abuber + abebola + abobama + abdollar + ebola + ferg + wordof + report + today + daily + qna + quandary + facts + excl +eighteen + nineteen + HeadlineWC + AbstractWC + OFC + six + Verbatim + picture + Abstractqmark+ NY + Headlineqmark + logWC + NewsDesk + Weekday + SubsectionName + Hour, data=NewsTrain, minbucket=20, method="class")
prp(SimpleCART)
predictionCART <- predict(SimpleCART, type = "class")
table(predictionCART, NewsTrain$Popular)
# And then make predictions on the test set:
library(ROCR)
ROCRpredTrain = prediction(predictionCART, NewsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrain, "auc")@y.values)
predictionLogTest <- predict(SimpleMod, newdata= NewsTest, type = "response")
table(predictionLogTest> 0.5)
library(rpart)
library(rpart.plot)
TrainCART <- rpart(Popular ~ WordCount + NewsDesk + Weekday + SectionName, data=NewsTrain, method= "class")
prp(TrainCART)
predictionCART <- predict(TrainCART, type = "prob")
predictionCART
table(predictionCART[,2] > 0.5, NewsTrain$Popular)
predictionCART2 <- predict(TrainCART, type = "class")
table(predictionCART2 , NewsTrain$Popular)
summary(predictionCART2)
ROCRpredTrainCART = prediction(predictionCART[,2], NewsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrainCART, "auc")@y.values)
library(randomForest)
SimpleRF = randomForest(as.factor(Popular) ~ music + abmusic + War + abwar + friday+ fashion + abfashion + China + Chinese + yt + SectionName + eu + abRR + comments + RR + Times + pres + abreader+ abreaders + abpuzzle + The + Facebook + qna + obama + nc + apple + recap + fourteen + excl + ten + abferg + Facebook + senator + NYAbs+ test + Ask + morning + we + senator + Facebook + uber + abuber + abebola + abobama + abdollar + ebola + ferg + wordof + report + today + daily + qna + quandary + facts + excl +eighteen + nineteen + HeadlineWC + AbstractWC + OFC + six + Verbatim + picture + Abstractqmark+ NY + Headlineqmark + logWC + NewsDesk + Weekday + SubsectionName + Hour, data=NewsTrain, method="class" , ntree=1000, nodesize=10)
varImpPlot(SimpleRF)
predictionSimpleRF <- predict(SimpleRF, type = "prob")
table(predictionSimpleRF[,2] > 0.5, NewsTrain$Popular)
ROCRpredTrainSimpleRF = prediction(predictionSimpleRF[,2], NewsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrainSimpleRF, "auc")@y.values)
predictionSimpleRFTest <- predict(SimpleRF, newdata= NewsTest, type = "prob")
table(predictionSimpleRFTest[,2]> 0.5)
MySubmission2 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predictionSimpleRFTest[,2])
write.csv(MySubmission2, "SimpleRF2.csv", row.names=FALSE)
SimpleRF2 = randomForest(as.factor(Popular) ~ music + abmusic + War + abwar + friday+ fashion + abfashion + China + Chinese + yt + SectionName + eu + abRR + comments + RR + Times + pres + abreader+ abreaders + abpuzzle + The + Facebook + qna + obama + nc + apple + recap + fourteen + excl + ten + abferg + Facebook + senator + NYAbs+ test + Ask + morning + we + senator + Facebook + uber + abuber + abebola + abobama + abdollar + ebola + ferg + wordof + report + today + daily + qna + quandary + facts + excl +eighteen + nineteen + OFC + six + Verbatim + picture + Abstractqmark+ NY + Headlineqmark + logWC + NewsDesk + Weekday + SubsectionName + Hour, data=NewsTrain, method="class" , ntree=1000, nodesize=10)
varImpPlot(SimpleRF2)
predictionSimpleRF2 <- predict(SimpleRF2, type = "prob")
table(predictionSimpleRF2[,2] > 0.5, NewsTrain$Popular)
ROCRpredTrainSimpleRF2 = prediction(predictionSimpleRF2[,2], NewsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrainSimpleRF2, "auc")@y.values)
predictionSimpleRF2Test <- predict(SimpleRF2, newdata= NewsTest, type = "prob")
table(predictionSimpleRF2Test[,2]> 0.5)
MySubmission2 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predictionSimpleRF2Test[,2])
write.csv(MySubmission2, "SimpleRFnoabstractheadlinewc.csv", row.names=FALSE)
SimpleRF3 = randomForest(as.factor(Popular) ~ SectionName + eu + abRR + comments + RR + Times + pres + abreader+ abreaders + abpuzzle + The + Facebook + qna + obama + nc + apple + recap + fourteen + excl + ten + abferg + Facebook + senator + NYAbs+ test + Ask + morning + we + senator + Facebook + uber + abuber + abebola + abobama + abdollar + ebola + ferg + wordof + report + today + daily + qna + quandary + facts + excl +eighteen + nineteen + OFC + six + Verbatim + picture + Abstractqmark+ NY + Headlineqmark + logWC + NewsDesk + Weekday + SubsectionName + Hour, data=NewsTrain, method="class" , ntree=1000, nodesize=10)
varImpPlot(SimpleRF3)
predictionSimpleRF3 <- predict(SimpleRF3, type = "prob")
table(predictionSimpleRF3[,2] > 0.5, NewsTrain$Popular)
ROCRpredTrainSimpleRF3 = prediction(predictionSimpleRF3[,2], NewsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrainSimpleRF3, "auc")@y.values)
predictionSimpleRF3Test <- predict(SimpleRF3, newdata= NewsTest, type = "prob")
MySubmission3 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predictionSimpleRF3Test[,2])
write.csv(MySubmission2, "SimpleRFnoabstractheadlinewcshort.csv", row.names=FALSE)
summary(NewsTrain2)
NewsTrain2 <- NewsTrain
NewsTrain2 <- NewsTrain
for(i in 1:6532)
{if(NewsTrain2$Popular[i] == 1) NewsTrain2$Popular[i] = "Yes" else NewsTrain2$Popular[i] = "No"}
trgbm = train(as.factor(Popular) ~ music + abmusic + War + abwar + friday+ fashion + abfashion+ China + Chinese + yt + SectionName + eu + abRR + comments + RR + Times + pres + abreader+ abreaders + abpuzzle + The + Facebook + qna + obama + nc + apple + recap + fourteen + excl + ten + abferg + Facebook + senator + NYAbs+ test + Ask + morning + we + senator + Facebook + uber + abuber + abebola + abobama + abdollar + ebola + ferg + wordof + report + today + daily + qna + quandary + facts + excl +eighteen + nineteen + HeadlineWC + AbstractWC + OFC + six + Verbatim + picture + Abstractqmark+ NY + Headlineqmark + logWC + NewsDesk + Weekday + SubsectionName + Hour, NewsTrain2, method="gbm", distribution= "bernoulli", metric="ROC", trControl=fitControl, verbose= FALSE)
predictiongbm <- predict(trgbm, NewsTrain2, type = "prob")
table(predictiongbm[,2] > 0.5, NewsTrain$Popular)
ROCRpredTraingbm = prediction(predictiongbm[,2], NewsTrain$Popular)
auc = as.numeric(performance(ROCRpredTraingbm, "auc")@y.values)
trgbm2 = train(as.factor(Popular) ~ music + abmusic + War + abwar + friday+ fashion + abfashion+ China + Chinese + yt + SectionName + eu + abRR + comments + RR + Times + pres + abreader+ abreaders + abpuzzle + The + Facebook + qna + obama + nc + apple + recap + fourteen + excl + ten + abferg + Facebook + senator + NYAbs+ test + Ask + morning + we + senator + Facebook + uber + abuber + abebola + abobama + abdollar + ebola + ferg + wordof + report + today + daily + qna + quandary + facts + excl +eighteen + nineteen + OFC + six + Verbatim + picture + Abstractqmark+ NY + Headlineqmark + logWC + NewsDesk + Weekday + SubsectionName + Hour, NewsTrain2, method="gbm", distribution= "bernoulli", metric="ROC", trControl=fitControl, verbose= FALSE)
predictiongbm2 <- predict(trgbm2, NewsTrain2, type = "prob")
predictiongbm <- predict(trgbm, NewsTrain2, type = "prob")
predictiongbmTest <- predict(trgbm, NewsTest, type = "prob")
predictiongbmTest2 <- predict(trgbm2, NewsTest, type = "prob")
predictiongbmTest
table(predictiongbmTest[,2]> 0.5)
MySubmission2 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predictiongbmTest[,2])
write.csv(MySubmission2, "Simplegbm2.csv", row.names=FALSE)
MySubmission3 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predictiongbmTest2[,2])
write.csv(MySubmission3, "Simplegbmnowc.csv", row.names=FALSE)
NewsTrain$SectionName = as.factor(NewsTrain$SectionName)
NewsTrain$NewsDesk = as.factor(NewsTrain$NewsDesk)
NewsTrain$SubsectionName = as.factor(NewsTrain$SubsectionName)
NewsTest$SectionName = factor(NewsTest$SectionName, levels= levels(NewsTrain$SectionName))
NewsTest$NewsDesk = factor(NewsTest$NewsDesk, levels= levels(NewsTrain$NewsDesk))
NewsTest$SubsectionName = factor(NewsTest$SubsectionName, levels= levels(NewsTrain$SubsectionName))
summary(NewsTrain)
summary(NewsTest)
table(NewsTrain$SectionName)
TrainForest <- randomForest(as.factor(Popular) ~ WordCount + NewsDesk + Weekday + SectionName, data=NewsTrain, method= "class")
predictionForest <- predict(TrainForest, type = "prob")
predictionForest
table(predictionForest[,2] > 0.5, NewsTrain$Popular)
ROCRpredTrainForest = prediction(predictionForest[,2], NewsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrainForest, "auc")@y.values)
predictionForest2 <- predict(TrainForest, newdata= NewsTest, type = "prob")
predictionForest2[,2]
MySubmission2 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predictionForest2[,2])
write.csv(MySubmission2, "RandomForestfirst.csv", row.names=FALSE)
# We can't compute the accuracy or AUC on the test set ourselves, since we don't have the dependent variable on the test set (you can compute it on the training set though!).
# However, you can submit the file on Kaggle to see how well the model performs. You can make up to 5 submissions per day, so don't hesitate to just upload a solution to see how you did.
# Let's prepare a submission file for Kaggle (for more about this, see the "Evaluation" page on the competition site):
predTest <- predict(SimpleMod, newdata= NewsTest, type = "response")
MySubmission = data.frame(UniqueID = NewsTest$UniqueID, Probability1 = PredTest)
write.csv(MySubmission, "SubmissionSimpleLog.csv", row.names=FALSE)
# You should upload the submission "SubmissionSimpleLog.csv" on the Kaggle website to use this as a submission to the competition
# This model was just designed to help you get started - to do well in the competition, you will need to build better models!
# One more helpful hint:
# This dataset has a date/time field (PubDate). You might remember dealing with date and time data in some of the Unit 1 homework problems.
# In this dataset, the following commands might be useful to you when trying to get date and time variables.
# To convert the date/time to something R will understand, you can use the following commands:
NewsTrain$PubDate = strptime(NewsTrain$PubDate, "%Y-%m-%d %H:%M:%S")
NewsTest$PubDate = strptime(NewsTest$PubDate, "%Y-%m-%d %H:%M:%S")
summary(NewsTrain)
summary(NewsTrain$PubDate)
# The second argument tells the strptime function how the data is formatted.
# If you opened the file in Excel or another spreadsheet software before loading it into R, you might have to adjust the format.
# See the help page ?strptime for more information.
# Now that R understands this field, there are many different attributes of the date and time that you can extract.
# For example, you can add a variable to your datasets called "Weekday" that contains the day of the week that the article was published (0 = Sunday, 1 = Monday, etc.), by using the following commands:
NewsTrain$Weekday = NewsTrain$PubDate$wday
NewsTest$Weekday = NewsTest$PubDate$wday
NewsTrain$Hour = NewsTrain$PubDate$hour
NewsTest$Hour = NewsTest$PubDate$hour
NewsTrain$Minute = NewsTrain$PubDate$min
NewsTest$Minute = NewsTest$PubDate$min
NewsTrain$Second = NewsTrain$PubDate$sec
NewsTest$Second = NewsTest$PubDate$sec
summary(NewsTest)
table(NewsTrain$Second)
table(NewsTrain$Weekday)
# Weekday could now be used as an independent variable in your predictive models.
# For more fields that you can extract from a date/time object in R, see the help page ?POSIXlt
library(tm)
library(SnowballC)
# Then create a corpus from the headline variable. You can use other variables in the dataset for text analytics, but we will just show you how to use this particular variable.
# Note that we are creating a corpus out of the training and testing data.
CorpusHeadline = Corpus(VectorSource(c(NewsTrain$Headline, NewsTest$Headline)))
# You can go through all of the standard pre-processing steps like we did in Unit 5:
CorpusHeadline = tm_map(CorpusHeadline, tolower)
# Remember this extra line is needed after running the tolower step:
CorpusHeadline = tm_map(CorpusHeadline, PlainTextDocument)
CorpusHeadline = tm_map(CorpusHeadline, removePunctuation)
CorpusHeadline = tm_map(CorpusHeadline, removeWords, stopwords("english"))
CorpusHeadline = tm_map(CorpusHeadline, stemDocument)
# Now we are ready to convert our corpus to a DocumentTermMatrix, remove sparse terms, and turn it into a data frame.
# We selected one particular threshold to remove sparse terms, but remember that you can try different numbers!
dtm = DocumentTermMatrix(CorpusHeadline)
sparse = removeSparseTerms(dtm, 0.995)
HeadlineWords = as.data.frame(as.matrix(sparse))
# Let's make sure our variable names are okay for R:
colnames(HeadlineWords) = make.names(colnames(HeadlineWords))
# Now we need to split the observations back into the training set and testing set.
# To do this, we can use the head and tail functions in R.
# The head function takes the first "n" rows of HeadlineWords (the first argument to the head function), where "n" is specified by the second argument to the head function.
# So here we are taking the first nrow(NewsTrain) observations from HeadlineWords, and putting them in a new data frame called "HeadlineWordsTrain"
HeadlineWordsTrain = head(HeadlineWords, nrow(NewsTrain))
# The tail function takes the last "n" rows of HeadlineWords (the first argument to the tail function), where "n" is specified by the second argument to the tail function.
# So here we are taking the last nrow(NewsTest) observations from HeadlineWords, and putting them in a new data frame called "HeadlineWordsTest"
HeadlineWordsTest = tail(HeadlineWords, nrow(NewsTest))
# Note that this split of HeadlineWords works to properly put the observations back into the training and testing sets, because of how we combined them together when we first made our corpus.
# Before building models, we want to add back the original variables from our datasets. We'll add back the dependent variable to the training set, and the WordCount variable to both datasets. You might want to add back more variables to use in your model - we'll leave this up to you!
HeadlineWordsTrain$Popular = NewsTrain$Popular
HeadlineWordsTrain$WordCount = NewsTrain$WordCount
HeadlineWordsTest$WordCount = NewsTest$WordCount
HeadlineWordsTrain$Weekday = NewsTrain$Weekday
HeadlineWordsTest$Weekday = NewsTest$Weekday
HeadlineWordsTrain$Hour= NewsTrain$Hour
HeadlineWordsTest$Hour = NewsTest$Hour
HeadlineWordsTrain$NewsDesk = NewsTrain$NewsDesk
HeadlineWordsTest$NewsDesk = NewsTest$NewsDesk
HeadlineWordsTrain$SectionName = NewsTrain$SectionName
HeadlineWordsTest$SectionName = NewsTest$SectionName
HeadlineWordsTrain$SubsectionName = NewsTrain$SectionName
HeadlineWordsTest$SubsectionName = NewsTest$SectionName
HeadlineWords$Popular = NewsTrain$Popular
sort(colSums(subset(HeadlineWordsTrain, Popular == 1)))
sort(colSums(HeadlineWordsTest))
HeadlineWordsTrain$HeadlineWC = rowSums(HeadlineWordsTrain)
HeadlineWordsTest$HeadlineWC = rowSums(HeadlineWordsTest)
sort(colSums(HeadlineWordsTrain))
summary(HeadlineWordsTest)
str(HeadlineWordsTrain)
summary(HeadlineWordsTrain)
HeadlineWordsTrain2 = HeadlineWordsTrain
HeadlineWordsTrain2$Popular = NewsTrain$Popular
summary(HeadlineWordsTrain2)
table(HeadlineWordsTrain$Popular)
table(NewsTrain$Popular)
HeadlineWordsLog <- glm(Popular ~ AbstractWordCount + HeadlineWordCount + appl + ebola + obama + day+ fashion + morn + new + read + today + word + logWC + NewsDesk + Weekday + SectionName + SubsectionName + Hour , data= HeadlineWordsTrain2, family= binomial)
summary(HeadlineWordsLog )
HeadlineWordsLog2 <- glm(Popular ~ . , data= HeadlineWordsTrain, family= binomial)
summary(HeadlineWordsLog2)
predHeadlineWordsLog <- predict(HeadlineWordsLog , type = "response")
table(predHeadlineWordsLog > 0.5, HeadlineWordsTrain$Popular)
ROCRpredTrain = prediction(predHeadlineWordsLog2, HeadlineWordsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrain, "auc")@y.values)
HeadlineWordsForest <- randomForest(as.factor(Popular) ~ . , data= HeadlineWordsTrain, method= "class")
predHeadlineWordsForest <- predict(HeadlineWordsForest, type="prob")
table(predHeadlineWordsForest[,2] > 0.5, HeadlineWordsTrain$Popular)
ROCRpredTrain = prediction(predHeadlineWordsForest[,2], HeadlineWordsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrain, "auc")@y.values)
HeadlineWordsForest2 <- randomForest(as.factor(Popular) ~ music + recap + war + day+ fashion + morn + new + read + today + word + WordCount + NewsDesk + Weekday + SectionName + SubsectionName, data= HeadlineWordsTrain, method= "class")
predHeadlineWordsForest2 <- predict(HeadlineWordsForest2, type="prob")
table(predHeadlineWordsForest2[,2] > 0.5, HeadlineWordsTrain$Popular)
ROCRpredTrain = prediction(predHeadlineWordsForest2[,2], HeadlineWordsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrain, "auc")@y.values)
predHeadlineWordsForest3 <- predict(HeadlineWordsForest2, newdata= HeadlineWordsTest, type="prob")
predHeadlineWordsForest3[,2]
MySubmission3 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predHeadlineWordsForest3[,2])
write.csv(MySubmission3, "RandomForestheadline.csv", row.names=FALSE)
NewsTrain$Abstract[2000]
NewsTrain$Snippet[2000]
###
CorpusAbstract = Corpus(VectorSource(c(NewsTrain$Abstract, NewsTest$Abstract)))
# You can go through all of the standard pre-processing steps like we did in Unit 5:
CorpusAbstract = tm_map(CorpusAbstract, tolower)
# Remember this extra line is needed after running the tolower step:
CorpusAbstract = tm_map(CorpusAbstract, PlainTextDocument)
CorpusAbstract = tm_map(CorpusAbstract, removePunctuation)
CorpusAbstract = tm_map(CorpusAbstract, removeWords, stopwords("english"))
CorpusAbstract = tm_map(CorpusAbstract, stemDocument)
# Now we are ready to convert our corpus to a DocumentTermMatrix, remove sparse terms, and turn it into a data frame.
# We selected one particular threshold to remove sparse terms, but remember that you can try different numbers!
dtm = DocumentTermMatrix(CorpusAbstract)
sparse = removeSparseTerms(dtm, 0.995)
AbstractWords = as.data.frame(as.matrix(sparse))
# Let's make sure our variable names are okay for R:
colnames(AbstractWords) = make.names(colnames(AbstractWords))
# Now we need to split the observations back into the training set and testing set.
# To do this, we can use the head and tail functions in R.
# The head function takes the first "n" rows of AbstractWords (the first argument to the head function), where "n" is specified by the second argument to the head function.
# So here we are taking the first nrow(NewsTrain) observations from AbstractWords, and putting them in a new data frame called "AbstractWordsTrain"
AbstractWordsTrain = head(AbstractWords, nrow(NewsTrain))
# The tail function takes the last "n" rows of AbstractWords (the first argument to the tail function), where "n" is specified by the second argument to the tail function.
# So here we are taking the last nrow(NewsTest) observations from AbstractWords, and putting them in a new data frame called "AbstractWordsTest"
AbstractWordsTest = tail(AbstractWords, nrow(NewsTest))
summary(AbstractWordsTrain)
colnames(AbstractWordsTrain) <- paste("Ab", colnames(AbstractWordsTrain), sep = "_")
colnames(AbstractWordsTest) <- paste("Ab", colnames(AbstractWordsTest), sep = "_")
WordsTrain <- cbind(HeadlineWordsTrain , AbstractWordsTrain)
summary(WordsTrain)
WordsTest <- cbind(HeadlineWordsTest , AbstractWordsTest)
summary(WordsTest)
sort(colSums(subset(WordsTrain, Popular == 1)))
sort(colSums(subset(HeadlineWordsTrain, Popular == 1)))
sort(colSums(AbstractWordsTest))
sort(colSums(AbstractWordsTrain))
summary(HeadlineWordsTrain)
summary(HeadlineWordsTest)
HeadlineWordsTrain$AbstractWC = rowSums(AbstractWordsTrain)
HeadlineWordsTest$AbstractWC = rowSums(AbstractWordsTest)
hist(HeadlineWordsTrain$HeadlineWC)
hist(HeadlineWordsTest$AbstractWC )
WordsLog <- glm(Popular ~ Hour + AbstractWC + day+ fashion + morn + new + read + today + word + logWC + NewsDesk + Weekday + Ab_play + Ab_american + Ab_latest + Ab_live + Ab_music + Ab_recent + Ab_say + Ab_school + Ab_system + Ab_women +Ab_write+ Ab_reader + Ab_play, data= WordsTrain, family= binomial)
summary(WordsLog )
WordsTrain2 <- WordsTrain
WordsTrain2$Popular <- NewsTrain$Popular
WordsLog2 <- glm(Popular ~ . , data= WordsTrain2, family= binomial)
summary(WordsLog2 )
predWordsLog = predict(WordsLog, data= WordsTrain, type = "response" )
table(WordsTrain$Popular, predWordsLog > 0.5)
predWordsLog
ROCRpredTrain = prediction(predWordsLog, WordsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrain, "auc")@y.values)
predWordsLog2 = predict(WordsLog2, data= WordsTrain, type = "response" )
table(WordsTrain$Popular, predWordsLog2 > 0.5)
ROCRpredTrain2 = prediction(predWordsLog2, WordsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrain2, "auc")@y.values)
predWordsLog3 = predict(WordsLog, newdata= WordsTest, type = "response" )
predWordsLog3
MySubmission4 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predWordsLog3)
write.csv(MySubmission4, "LogRegressioncorpus.csv", row.names=FALSE)
WordsForest <- randomForest(as.factor(Popular) ~ music+ recap + war + day+ fashion + morn + new + read + today + word + WordCount + NewsDesk + Weekday + SubsectionName+ SectionName + Sub_play + Sub_american + Sub_latest + Sub_live + Sub_music + Sub_recent + Sub_say + Sub_school + Sub_system + Sub_women + Sub_write+ Sub_reader + Sub_play, data= WordsTrain, method= "class", ntree=1000, nodesize=15)
summary(WordsForest )
predWordsForest = predict(WordsForest, data= WordsTrain, type = "prob" )
table(WordsTrain$Popular, predWordsForest[,2] > 0.5)
ROCRpredTrain = prediction(predWordsForest[,2], WordsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrain, "auc")@y.values)
WordsForest2 <- randomForest(as.factor(Popular) ~ . , data= WordsTrain, method= "class", ntree= 1000, nodesize= 15)
summary(WordsForest2 )
predWordsForest2 = predict(WordsForest2, data= WordsTrain, type = "prob" )
table(WordsTrain$Popular, predWordsForest2[,2] > 0.5)
ROCRpredTrain2 = prediction(predWordsForest2[,2], WordsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrain2, "auc")@y.values)
predWordsForest3 = predict(WordsForest, newdata= WordsTest, type = "prob" )
MySubmission5 = data.frame(UniqueID = NewsTest$UniqueID, Probability1= predWordsForest3[,2])
write.csv(MySubmission5, "RandomForestcorpus.csv", row.names=FALSE)
WordsCART1 = rpart(Popular ~ music+ recap + war + day+ fashion + morn + new + read + today + word + WordCount + NewsDesk + Weekday + SectionName + Sub_play + Sub_american + Sub_latest + Sub_live + Sub_music + Sub_recent + Sub_say + Sub_school + Sub_system + Sub_women + Sub_write+ Sub_reader + Sub_play, data= WordsTrain, method= "class")
predWordsCART1 = predict(WordsCART1, data=WordsTrain, type = "prob" )
prp(WordsCART1)
table(WordsTrain$Popular, predWordsCART1[,2] > 0.5)
ROCRpredTrain = prediction(predWordsCART1[,2], WordsTrain$Popular)
auc = as.numeric(performance(ROCRpredTrain, "auc")@y.values)
str(NewsTrain$Headline)
#Feature engineering based on common phrases, punctuation marks, etc.
NewsTrain$qna = grepl("Q. and A." , NewsTrain$Headline )
NewsTest$qna = grepl("Q. and A." , NewsTest$Headline )
NewsTrain$Headlineqmark = grepl("\\?" , NewsTrain$Headline )
NewsTest$Headlineqmark = grepl("\\?" , NewsTest$Headline )
table(NewsTrain$Headlineqmark , NewsTrain$Popular)
table(NewsTrain$abdollar , NewsTrain$Popular)
table(NewsTrain$headlinequote , NewsTrain$Popular)
NewsTrain$Abstractqmark = grepl("\\?" , NewsTrain$Abstract )
NewsTest$Abstractqmark = grepl("\\?" , NewsTest$Abstract )
NewsTrain$weirdmark= grepl("\\|" , NewsTrain$Headline )
NewsTrain$comma= grepl("\\," , NewsTrain$Headline )
NewsTrain$dollar= grepl("\\$" , NewsTrain$Headline )
NewsTest$dollar= grepl("\\$" , NewsTest$Headline )
NewsTrain$abdollar= grepl("\\$" , NewsTrain$Abstract )
NewsTest$abdollar= grepl("\\$" , NewsTest$Abstract )
HeadlineWordsTrain$abdollar <- NewsTrain$abdollar
HeadlineWordsTest$abdollar <- NewsTest$abdollar
NewsTrain$headlinequote = grepl("\\'" , NewsTrain$Headline )
NewsTrain$period = grepl("\\." , NewsTrain$Headline )
NewsTrain$colon = grepl("\\:" , NewsTrain$Headline )
NewsTest$colon = grepl("\\:" , NewsTest$Headline )
NewsTrain$Abcolon = grepl("\\:" , NewsTrain$Abstract )
NewsTest$Abcolon = grepl("\\:" , NewsTest$Abstract )
WordsTrain$nineteen <- NewsTrain$nineteen
WordsTrain$eighteen <- NewsTrain$eighteen
NewsTrain$nineteen = grepl("19" , NewsTrain$Headline )
NewsTrain$eighteen = grepl("18" , NewsTrain$Headline )
summary(NewsTrain)
NewsTest$nineteen = grepl("19" , NewsTest$Headline )
NewsTest$eighteen = grepl("18" , NewsTest$Headline )
summary(NewsTest)
WordsTest$nineteen <- NewsTest$nineteen
WordsTest$eighteen <- NewsTest$eighteen
WordsTest$eighteen
NewsTrain$fourteen = grepl("2014" , NewsTrain$Headline )
NewsTest$fourteen = grepl("2014" , NewsTest$Headline )
grepl("\\$" , NewsTrain$Headline )
NewsTrain$OFC = grepl("Open for Comments" , NewsTrain$Headline )
NewsTest$OFC = grepl("Open for Comments" , NewsTest$Headline )
HeadlineWordsTrain$OFC = NewsTrain$OFC
HeadlineWordsTest$OFC = NewsTest$OFC
NewsTrain$Obama = grepl("Obama" , NewsTrain$Headline )
table(NewsTrain$Obama , NewsTrain$Popular)
NewsTrain$review = grepl("review" , NewsTrain$Headline )
table(NewsTrain$review , NewsTrain$Popular)
NewsTrain$movie = grepl("review" , NewsTrain$Headline )
table(NewsTrain$movie , NewsTrain$Popular)
NewsTrain$NY = grepl("\\New York" , NewsTrain$Headline )
NewsTest$NY = grepl("\\New York" , NewsTest$Headline )
HeadlineWordsTrain$NY = NewsTrain$NY
HeadlineWordsTest$NY = NewsTest$NY
NewsTrain$comments = grepl("\\Comments" , NewsTrain$Headline )
NewsTrain$comments = grepl("comments" , NewsTrain$Abstract )
NewsTest$comments = grepl("comments" , NewsTest$Abstract )
NewsTrain$wordof = grepl("Word of" , NewsTrain$Headline )
NewsTrain$NYAbs = grepl("\\New York" , NewsTrain$Abstract )
NewsTrain$obama = grepl("\\Obama" , NewsTrain$Headline )
NewsTrain$abobama = grepl("\\Obama" , NewsTrain$Abstract)
NewsTrain$report = grepl("\\Report" , NewsTrain$Headline )
NewsTrain$today = grepl("Today in" , NewsTrain$Headline )
NewsTrain$ten = grepl("10" , NewsTrain$Headline )
NewsTrain$t = grepl("\\T's" , NewsTrain$Headline )
NewsTrain$A = grepl("A " , NewsTrain$Headline )
NewsTrain$we = grepl("What We" , NewsTrain$Headline )
NewsTest$comments = grepl("\\Comments" , NewsTest$Headline )
NewsTest$wordof = grepl("Word of" , NewsTest$Headline )
NewsTest$NYAbs = grepl("\\New York" , NewsTest$Abstract )
NewsTest$obama = grepl("\\Obama" , NewsTest$Headline )
NewsTest$abobama = grepl("\\Obama" , NewsTest$Abstract)
NewsTest$report = grepl("\\Report" , NewsTest$Headline )
NewsTest$today = grepl("Today in" , NewsTest$Headline )
NewsTest$ten = grepl("10" , NewsTest$Headline )
NewsTest$t = grepl("\\T's" , NewsTest$Headline )
NewsTest$A = grepl("A " , NewsTest$Headline )
NewsTest$we = grepl("What We" , NewsTest$Headline )
NewsTrain$Verbatim = grepl("Verbatim" , NewsTrain$Headline )
NewsTest$Verbatim = grepl("Verbatim" , NewsTest$Headline )
HeadlineWordsTrain$Verbatim = NewsTrain$Verbatim
HeadlineWordsTest$Verbatim = NewsTest$Verbatim
table(NewsTrain$picture, NewsTrain$Popular)
NewsTrain$picture= grepl("\\Pictures of", NewsTrain$Headline)
NewsTest$picture= grepl("\\Pictures of", NewsTest$Headline)
HeadlineWordsTrain$picture= NewsTrain$picture
HeadlineWordsTest$picture= NewsTest$picture
NewsTrain$six= grepl("6 Q", NewsTrain$Headline)
NewsTest$six= grepl("6 Q", NewsTest$Headline)
HeadlineWordsTrain$six= NewsTrain$six
HeadlineWordsTest$six= NewsTest$six
HeadlineWordsTrain$daily <- NewsTrain$daily
HeadlineWordsTest$daily <- NewsTest$daily
NewsTrain$daily= grepl("Daily", NewsTrain$Headline)
NewsTest$daily= grepl("Daily", NewsTest$Headline)
NewsTrain$DC= grepl("Daily Clip", NewsTrain$Headline)
NewsTest$DC= grepl("Daily Clip", NewsTest$Headline)
NewsTrain$test= grepl("Test Yourself", NewsTrain$Headline)
NewsTest$test= grepl("Test Yourself", NewsTest$Headline)
NewsTrain$DR= grepl("Daily Report", NewsTrain$Headline)
NewsTest$DR= grepl("Daily Report", NewsTest$Headline)
NewsTrain$Ask = grepl("Ask Well", NewsTrain$Headline)
NewsTest$Ask = grepl("Ask Well", NewsTest$Headline)
NewsTrain$nc = grepl("No Comment", NewsTrain$Headline)
NewsTest$nc = grepl("No Comment", NewsTest$Headline)
NewsTrain$China = grepl("China", NewsTrain$Headline)
NewsTest$China = grepl("China", NewsTest$Headline)
NewsTrain$Chinese = grepl("Chinese", NewsTrain$Headline)
NewsTest$Chinese = grepl("Chinese", NewsTest$Headline)
WordsTrain$ferg <- NewsTrain$ferg
WordsTest$ferg <- NewsTest$ferg
NewsTrain$ferg = grepl("\\Ferguson", NewsTrain$Headline)
NewsTest$ferg = grepl("\\Ferguson", NewsTest$Headline)
NewsTrain$abferg = grepl("\\Ferguson", NewsTrain$Abstract)
NewsTest$abferg = grepl("\\Ferguson", NewsTest$Abstract)
WordsTrain$abferg <- NewsTrain$abferg
WordsTest$abferg <- NewsTest$abferg
NewsTrain$ebola = grepl("\\Ebola", NewsTrain$Headline)
NewsTest$ebola = grepl("\\Ebola", NewsTest$Headline)
HeadlineWordsTrain$abebola <- NewsTrain$abebola
HeadlineWordsTest$abebola <- NewsTest$abebola
NewsTrain$abebola = grepl("\\Ebola", NewsTrain$Abstract)
NewsTest$abebola = grepl("\\Ebola", NewsTest$Abstract)
NewsTrain$abuber = grepl("\\Uber", NewsTrain$Abstract)
NewsTest$abuber = grepl("\\Uber", NewsTest$Abstract)
NewsTrain$abpuzzle = grepl("\\puzzle", NewsTrain$Abstract)
NewsTest$abpuzzle = grepl("\\puzzle", NewsTest$Abstract)
NewsTrain$abreader = grepl("reader", NewsTrain$Abstract)
NewsTest$abreader = grepl("reader", NewsTest$Abstract)
NewsTrain$abreaders = grepl("readers", NewsTrain$Abstract)
NewsTest$abreaders = grepl("readers", NewsTest$Abstract)
NewsTrain$pres = grepl("president", NewsTrain$Abstract)
NewsTest$pres = grepl("president", NewsTest$Abstract)
NewsTrain$abTimes = grepl("Times", NewsTrain$Abstract)
NewsTest$abTimes = grepl("Times", NewsTest$Abstract)
NewsTrain$Times = grepl("Times", NewsTrain$Headline)
NewsTest$Times = grepl("Times", NewsTest$Headline)
summary(NewsTrain)
table(NewsTrain$abuber, NewsTrain$Popular)
HeadlineWordsTrain$abuber<- NewsTrain$abuber
HeadlineWordsTest$abuber<- NewsTest$abuber
NewsTrain$uber = grepl("Uber", NewsTrain$Headline)
NewsTest$uber = grepl("Uber", NewsTest$Headline)
NewsTrain$The = grepl("The", NewsTrain$Headline)
NewsTest$The = grepl("The", NewsTest$Headline)
HeadlineWordsTrain$uber<- NewsTrain$uber
HeadlineWordsTest$uber<- NewsTest$uber
NewsTrain$Facebook = grepl("Facebook", NewsTrain$Headline)
NewsTest$Facebook = grepl("Facebook", NewsTest$Headline)
NewsTrain$RR = grepl("Readers Respond", NewsTrain$Headline)
NewsTest$RR = grepl("Readers Respond", NewsTest$Headline)
NewsTrain$yt = grepl("Your Turn", NewsTrain$Headline)
NewsTest$yt = grepl("Your Turn", NewsTest$Headline)
NewsTrain$fashion = grepl("Fashion", NewsTrain$Headline)
NewsTest$fashion = grepl("Fashion", NewsTest$Headline)
NewsTrain$abfashion = grepl("fashion", NewsTrain$Abstract)
NewsTest$abfashion = grepl("fashion", NewsTest$Abstract)
NewsTrain$abRR = grepl("Readers", NewsTrain$Abstract)
NewsTest$abRR = grepl("Readers", NewsTest$Abstract)
NewsTrain$senator = grepl("Senator", NewsTrain$Abstract)
NewsTest$senator = grepl("Senator", NewsTest$Abstract)
NewsTrain$eu = grepl("Euro", NewsTrain$Headline)
NewsTest$eu = grepl("Euro", NewsTest$Headline)
NewsTrain$music = grepl("Music", NewsTrain$Headline)
NewsTest$music = grepl("Music", NewsTest$Headline)
NewsTrain$abmusic = grepl("music", NewsTrain$Abstract)
NewsTest$abmusic = grepl("music", NewsTest$Abstract)
NewsTrain$War = grepl("War", NewsTrain$Headline)
NewsTest$War = grepl("War", NewsTest$Headline)
NewsTrain$abwar = grepl("war", NewsTrain$Abstract)
NewsTest$abwar = grepl("war", NewsTest$Abstract)
summary(NewsTrain)
table(NewsTrain$ferg, NewsTrain$Popular)
table(NewsTrain$ebola, NewsTrain$Popular)
HeadlineWordsTrain$nc = NewsTrain$nc
HeadlineWordsTest$nc =NewsTest$nc
NewsTrain$recap = grepl("\\Recap", NewsTrain$Headline)
NewsTrain$facts = grepl("Facts", NewsTrain$Headline)
NewsTrain$morning = grepl("Morning Agenda", NewsTrain$Headline)
NewsTrain$friday = grepl("Friday Night", NewsTrain$Headline)
NewsTrain$apple = grepl("\\Apple", NewsTrain$Headline)
NewsTrain$quandary = grepl("Weekly Quandary", NewsTrain$Headline)
NewsTrain$why = grepl("Why", NewsTrain$Headline)
NewsTrain$excl = grepl("\\!", NewsTrain$Headline)
NewsTrain$posvar = NewsTrain$quandary | NewsTrain$facts | NewsTrain$nc | NewsTrain$Ask
NewsTest$recap = grepl("\\Recap", NewsTest$Headline)
NewsTest$facts = grepl("Facts", NewsTest$Headline)
NewsTest$morning = grepl("Morning Agenda", NewsTest$Headline)
NewsTest$friday = grepl("Friday Night", NewsTest$Headline)
NewsTest$apple = grepl("\\Apple", NewsTest$Headline)
NewsTest$quandary = grepl("Weekly Quandary", NewsTest$Headline)
NewsTest$why = grepl("Why", NewsTest$Headline)
NewsTest$excl = grepl("\\!", NewsTest$Headline)
table(NewsTrain$recap, NewsTrain$Popular)
table(NewsTrain$excl, NewsTrain$Popular)
table(grepl("Open for Comments" , NewsTest$Headline ))
install.packages("startsWith")
=NewsTrain$recap
=NewsTrain$facts
=NewsTrain$morning
=NewsTrain$friday
=NewsTrain$apple
NewsTrain$quandary
NewsTrain$why
NewsTrain$excl
NewsTest$recap
NewsTest$facts
NewsTest$morning
NewsTest$friday
NewsTest$apple
NewsTest$quandary
NewsTest$why
NewsTest$excl
library(startsWith)
startsWith(NewsTrain$Headline, A, trim=FALSE, ignore.case=FALSE)
summary(NewsTrain)
summary(NewsTest)
pop <- subset(HeadlineWordsTrain, Popular == "Yes")
summary(pop)
hist(pop$AbstractWordCount)
hist(HeadlineWordsTrain$AbstractWC)
table(NewsTrain$NY, NewsTrain$Popular)
HeadlineWordsTrain$AbstractWordCount = sapply(gregexpr("\\W+", NewsTrain$Abstract), length)
HeadlineWordsTrain$HeadlineWordCount = sapply(gregexpr("\\W+", NewsTrain$Headline), length)
hist(log(1 + NewsTrain$HeadlineWC))
hist(HeadlineWordsTrain$HeadlineWordCount)
|
sampleUnivSN <- function(mu, sigma, lambda, n = 1) {
delta = lambda / sqrt(1 + lambda ^ 2)
u <- stats::rnorm(n = n, mean = 0, sd = sigma)
e <- stats::rnorm(n = n, mean = 0, sd = sigma)
y <- mu + delta * abs(u) + sqrt(1 - delta ^ 2) * e
return(y)
}
sampleUnivSt <- function(mu, sigma, nu, lambda, n = 1) {
e <- sampleUnivSN(0, 1, lambda, n = 1)
# A gamma variable
w <- stats::rgamma(n = 1, shape = nu / 2, scale = 2 / nu)
# A skew-t variable
y <- mu + sigma * e / sqrt(w)
return(y)
}
#' Draw a sample from a univariate skew-t mixture.
#'
#' @param alphak The parameters of the gating network. `alphak` is a matrix of
#' size \emph{(q + 1, K - 1)}, with \emph{K - 1}, the number of regressors
#' (experts) and \emph{q} the order of the logistic regression
#' @param betak Matrix of size \emph{(p + 1, K)} representing the regression
#' coefficients of the experts network.
#' @param sigmak Vector of length \emph{K} giving the standard deviations of
#' the experts network.
#' @param lambdak Vector of length \emph{K} giving the skewness parameter of
#' each experts.
#' @param nuk Vector of length \emph{K} giving the degrees of freedom of the
#' experts network t densities.
#' @param x A vector og length \emph{n} representing the inputs (predictors).
#'
#' @return A list with the output variable `y` and statistics.
#' \itemize{
#' \item `y` Vector of length \emph{n} giving the output variable.
#' \item `zi` A vector of size \emph{n} giving the hidden label of the
#' expert component generating the i-th observation. Its elements are
#' \eqn{zi[i] = k}, if the i-th observation has been generated by the
#' k-th expert.
#' \item `z` A matrix of size \emph{(n, K)} giving the values of the binary
#' latent component indicators \eqn{Z_{ik}}{Zik} such that
#' \eqn{Z_{ik} = 1}{Zik = 1} iff \eqn{Z_{i} = k}{Zi = k}.
#' \item `stats` A list whose elements are:
#' \itemize{
#' \item `Ey_k` Matrix of size \emph{(n, K)} giving the conditional
#' expectation of Yi the output variable given the value of the
#' hidden label of the expert component generating the ith observation
#' \emph{zi = k}, and the value of predictor \emph{X = xi}.
#' \item `Ey` Vector of length \emph{n} giving the conditional expectation
#' of Yi given the value of predictor \emph{X = xi}.
#' \item `Vary_k` Vector of length \emph{k} representing the conditional
#' variance of Yi given \emph{zi = k}, and \emph{X = xi}.
#' \item `Vary` Vector of length \emph{n} giving the conditional expectation
#' of Yi given \emph{X = xi}.
#' }
#' }
#' @export
#'
#' @examples
#' n <- 500 # Size of the sample
#' alphak <- matrix(c(0, 8), ncol = 1) # Parameters of the gating network
#' betak <- matrix(c(0, -2.5, 0, 2.5), ncol = 2) # Regression coefficients of the experts
#' sigmak <- c(0.5, 0.5) # Standard deviations of the experts
#' lambdak <- c(3, 5) # Skewness parameters of the experts
#' nuk <- c(5, 7) # Degrees of freedom of the experts network t densities
#' x <- seq.int(from = -1, to = 1, length.out = n) # Inputs (predictors)
#'
#' # Generate sample of size n
#' sample <- sampleUnivStMoE(alphak = alphak, betak = betak, sigmak = sigmak,
#' lambdak = lambdak, nuk = nuk, x = x)
#'
#' # Plot points and estimated means
#' plot(x, sample$y, pch = 4)
#' lines(x, sample$stats$Ey_k[, 1], col = "blue", lty = "dotted", lwd = 1.5)
#' lines(x, sample$stats$Ey_k[, 2], col = "blue", lty = "dotted", lwd = 1.5)
#' lines(x, sample$stats$Ey, col = "red", lwd = 1.5)
sampleUnivStMoE <- function(alphak, betak, sigmak, lambdak, nuk, x) {
n <- length(x)
p <- nrow(betak) - 1
q <- nrow(alphak) - 1
K = ncol(betak)
# Build the regression design matrices
XBeta <- designmatrix(x, p, q)$XBeta # For the polynomial regression
XAlpha <- designmatrix(x, p, q)$Xw # For the logistic regression
y <- rep.int(x = 0, times = n)
z <- zeros(n, K)
zi <- rep.int(x = 0, times = n)
deltak <- lambdak / sqrt(1 + lambdak ^ 2)
# Calculate the mixing proportions piik:
piik <- multinomialLogit(alphak, XAlpha, zeros(n, K), ones(n, 1))$piik
for (i in 1:n) {
zik <- stats::rmultinom(n = 1, size = 1, piik[i,])
mu <- as.numeric(XBeta[i,] %*% betak[, zik == 1])
sigma <- sigmak[zik == 1]
lambda <- lambdak[zik == 1]
nu <- nuk[zik == 1]
y[i] <- sampleUnivSt(mu = mu, sigma = sigma, nu = nu, lambda = lambda)
z[i, ] <- t(zik)
zi[i] <- which.max(zik)
}
# Statistics (means, variances)
Xi_nuk = sqrt(nuk / pi) * (gamma(nuk / 2 - 1 / 2)) / (gamma(nuk / 2))
# E[yi|xi,zi=k]
Ey_k <- XBeta %*% betak + ones(n, 1) %*% (sigmak * deltak * Xi_nuk)
# E[yi|xi]
Ey <- rowSums(piik * Ey_k)
# Var[yi|xi,zi=k]
Vary_k <- (nuk / (nuk - 2) - (deltak ^ 2) * (Xi_nuk ^ 2)) * (sigmak ^ 2)
# Var[yi|xi]
Vary <- rowSums(piik * (Ey_k ^ 2 + ones(n, 1) %*% Vary_k)) - Ey ^ 2
stats <- list()
stats$Ey_k <- Ey_k
stats$Ey <- Ey
stats$Vary_k <- Vary_k
stats$Vary <- Vary
return(list(y = y, zi = zi, z = z, stats = stats))
}
| /R/sampleUnivStMoE.R | no_license | fchamroukhi/MEteorits | R | false | false | 5,187 | r | sampleUnivSN <- function(mu, sigma, lambda, n = 1) {
delta = lambda / sqrt(1 + lambda ^ 2)
u <- stats::rnorm(n = n, mean = 0, sd = sigma)
e <- stats::rnorm(n = n, mean = 0, sd = sigma)
y <- mu + delta * abs(u) + sqrt(1 - delta ^ 2) * e
return(y)
}
sampleUnivSt <- function(mu, sigma, nu, lambda, n = 1) {
e <- sampleUnivSN(0, 1, lambda, n = 1)
# A gamma variable
w <- stats::rgamma(n = 1, shape = nu / 2, scale = 2 / nu)
# A skew-t variable
y <- mu + sigma * e / sqrt(w)
return(y)
}
#' Draw a sample from a univariate skew-t mixture.
#'
#' @param alphak The parameters of the gating network. `alphak` is a matrix of
#' size \emph{(q + 1, K - 1)}, with \emph{K - 1}, the number of regressors
#' (experts) and \emph{q} the order of the logistic regression
#' @param betak Matrix of size \emph{(p + 1, K)} representing the regression
#' coefficients of the experts network.
#' @param sigmak Vector of length \emph{K} giving the standard deviations of
#' the experts network.
#' @param lambdak Vector of length \emph{K} giving the skewness parameter of
#' each experts.
#' @param nuk Vector of length \emph{K} giving the degrees of freedom of the
#' experts network t densities.
#' @param x A vector og length \emph{n} representing the inputs (predictors).
#'
#' @return A list with the output variable `y` and statistics.
#' \itemize{
#' \item `y` Vector of length \emph{n} giving the output variable.
#' \item `zi` A vector of size \emph{n} giving the hidden label of the
#' expert component generating the i-th observation. Its elements are
#' \eqn{zi[i] = k}, if the i-th observation has been generated by the
#' k-th expert.
#' \item `z` A matrix of size \emph{(n, K)} giving the values of the binary
#' latent component indicators \eqn{Z_{ik}}{Zik} such that
#' \eqn{Z_{ik} = 1}{Zik = 1} iff \eqn{Z_{i} = k}{Zi = k}.
#' \item `stats` A list whose elements are:
#' \itemize{
#' \item `Ey_k` Matrix of size \emph{(n, K)} giving the conditional
#' expectation of Yi the output variable given the value of the
#' hidden label of the expert component generating the ith observation
#' \emph{zi = k}, and the value of predictor \emph{X = xi}.
#' \item `Ey` Vector of length \emph{n} giving the conditional expectation
#' of Yi given the value of predictor \emph{X = xi}.
#' \item `Vary_k` Vector of length \emph{k} representing the conditional
#' variance of Yi given \emph{zi = k}, and \emph{X = xi}.
#' \item `Vary` Vector of length \emph{n} giving the conditional expectation
#' of Yi given \emph{X = xi}.
#' }
#' }
#' @export
#'
#' @examples
#' n <- 500 # Size of the sample
#' alphak <- matrix(c(0, 8), ncol = 1) # Parameters of the gating network
#' betak <- matrix(c(0, -2.5, 0, 2.5), ncol = 2) # Regression coefficients of the experts
#' sigmak <- c(0.5, 0.5) # Standard deviations of the experts
#' lambdak <- c(3, 5) # Skewness parameters of the experts
#' nuk <- c(5, 7) # Degrees of freedom of the experts network t densities
#' x <- seq.int(from = -1, to = 1, length.out = n) # Inputs (predictors)
#'
#' # Generate sample of size n
#' sample <- sampleUnivStMoE(alphak = alphak, betak = betak, sigmak = sigmak,
#' lambdak = lambdak, nuk = nuk, x = x)
#'
#' # Plot points and estimated means
#' plot(x, sample$y, pch = 4)
#' lines(x, sample$stats$Ey_k[, 1], col = "blue", lty = "dotted", lwd = 1.5)
#' lines(x, sample$stats$Ey_k[, 2], col = "blue", lty = "dotted", lwd = 1.5)
#' lines(x, sample$stats$Ey, col = "red", lwd = 1.5)
sampleUnivStMoE <- function(alphak, betak, sigmak, lambdak, nuk, x) {
n <- length(x)
p <- nrow(betak) - 1
q <- nrow(alphak) - 1
K = ncol(betak)
# Build the regression design matrices
XBeta <- designmatrix(x, p, q)$XBeta # For the polynomial regression
XAlpha <- designmatrix(x, p, q)$Xw # For the logistic regression
y <- rep.int(x = 0, times = n)
z <- zeros(n, K)
zi <- rep.int(x = 0, times = n)
deltak <- lambdak / sqrt(1 + lambdak ^ 2)
# Calculate the mixing proportions piik:
piik <- multinomialLogit(alphak, XAlpha, zeros(n, K), ones(n, 1))$piik
for (i in 1:n) {
zik <- stats::rmultinom(n = 1, size = 1, piik[i,])
mu <- as.numeric(XBeta[i,] %*% betak[, zik == 1])
sigma <- sigmak[zik == 1]
lambda <- lambdak[zik == 1]
nu <- nuk[zik == 1]
y[i] <- sampleUnivSt(mu = mu, sigma = sigma, nu = nu, lambda = lambda)
z[i, ] <- t(zik)
zi[i] <- which.max(zik)
}
# Statistics (means, variances)
Xi_nuk = sqrt(nuk / pi) * (gamma(nuk / 2 - 1 / 2)) / (gamma(nuk / 2))
# E[yi|xi,zi=k]
Ey_k <- XBeta %*% betak + ones(n, 1) %*% (sigmak * deltak * Xi_nuk)
# E[yi|xi]
Ey <- rowSums(piik * Ey_k)
# Var[yi|xi,zi=k]
Vary_k <- (nuk / (nuk - 2) - (deltak ^ 2) * (Xi_nuk ^ 2)) * (sigmak ^ 2)
# Var[yi|xi]
Vary <- rowSums(piik * (Ey_k ^ 2 + ones(n, 1) %*% Vary_k)) - Ey ^ 2
stats <- list()
stats$Ey_k <- Ey_k
stats$Ey <- Ey
stats$Vary_k <- Vary_k
stats$Vary <- Vary
return(list(y = y, zi = zi, z = z, stats = stats))
}
|
library("rjson")
readResStat<-function(root, type="DataNode", subtype="net")
{
resstat<-list()
class(resstat)<-"resstat"
resstat$name<-paste(type,subtype,sep="_")
resstat$files<-sort(list.files(root,pattern=paste("node.*",type,".*",subtype,sep="")))
if ( is.null(resstat$files))
null
resstat$data<-list()
for(i in 1:length(resstat$files))
{
resstat$data[[i]]<-as.matrix(read.table(paste(root,resstat$files[[i]],sep="/"),header=FALSE,sep=" "))
}
resstat
}
readResStatsOfRun<-function( runname, root)
{
resstats<-list()
class(resstats)<-"resstats"
resstats$runname<-paste("resstats_",runname,sep="")
resstats$data$datanode_net<-readResStat(root, "DataNode","net")
resstats$data$datanode_disk<-readResStat(root, "DataNode","disk")
resstats$data$nodemanager_net<-readResStat(root, "NodeManager","net")
resstats$data$nodemanager_disk<-readResStat(root, "NodeManager","disk")
resstats$data$yarnchild_net<-readResStat(root, "YarnChild","net")
resstats$data$yarnchild_disk<-readResStat(root, "YarnChild","disk")
resstats
}
slice<-function(l, indices)
{
j<-1
result<-list()
for(i in 1:length(indices))
{
result[[j]]<-l[[indices[i]]]
j<-j+1
}
result
}
plot.resstat<-function(resstat, netin=TRUE, nodes=1:length(resstat$data))
{
if (netin)
{
index<-2
ylab<-"net in (bytes)"
}
else
{
index<-3
ylab<-"net out (bytes)"
}
ymin<-0
ymax<-0
xmin<-resstat$data[[1]][1,1]
xmax<-resstat$data[[1]][1,1]
slicedData<-slice(resstat$data, nodes)
files<-slice(resstat$files, nodes)
transformedData<-list()
for(i in 1:length(slicedData))
{
transformedData[[i]]<-transform.resstat(slicedData[[i]])
}
for(i in 1:length(transformedData))
{
ymin<-min(ymin,min(transformedData[[i]][,index]-transformedData[[i]][1,index]))
ymax<-max(ymax,max(transformedData[[i]][,index]-transformedData[[i]][1,index]))
xmin<-min(xmin,min(transformedData[[i]][,1]))
xmax<-max(xmax,max(transformedData[[i]][,1]))
}
for(i in 1:length(transformedData))
{
if ( i==1)
plot(transformedData[[i]][,1], transformedData[[i]][,index]-transformedData[[i]][1,index], type="l", xlim=c(xmin,xmax),ylim=c(ymin,ymax), xlab=files[i], ylab=ylab)
else
lines(transformedData[[i]][,1], transformedData[[i]][,index]-transformedData[[i]][1,index], col=i)
}
}
plot.resstats<-function(resstats, nodename, input=TRUE)
{
if (input)
pos<-2
else
pos<-3
ry<-vector()
rx<-vector()
leg<-vector()
for(i in 1:length(resstats$data))
{
nodenames<-lapply(resstats$data[[i]]$files,FUN=function(x){strsplit(x,"_")[[1]][1]})
if ( is.na(match(nodename,nodenames)))
next
leg<-c(leg,resstats$data[[i]]$name)
#print(resstats$data[[i]]$name)
#print(match(nodename,nodenames))
#print(resstats$data[[i]]$data[match(nodename,nodenames)][[1]])
transformed<-transform.resstat(resstats$data[[i]]$data[match(nodename,nodenames)][[1]])
ry<-cbind(ry,range(transformed[,pos]))
rx<-cbind(rx,range(transformed[,1]))
}
plot.new()
plot.window(xlim=range(rx),ylim=range(ry))
axis(1)
axis(2)
if (input)
title(main="resources statistics in",xlab=nodename, ylab="MB")
else
title(main="resources statistics out",xlab=nodename, ylab="MB")
legend(x="left", y="top", legend=leg, col=c(1,2,3,4,5,6),lty=1)
for(i in 1:length(resstats$data))
{
nodenames<-lapply(resstats$data[[i]]$files,FUN=function(x){strsplit(x,"_")[[1]][1]})
if ( is.na(match(nodename,nodenames)))
next
transformed<-transform.resstat(resstats$data[[i]]$data[match(nodename,nodenames)][[1]])
lines(transformed[,1], transformed[,pos], col=i)
}
}
transform.resstat<-function(data)
{
#result<-matrix(0,nrow=nrow(data)-1, ncol=ncol(data))
result<-matrix(0,nrow=nrow(data), ncol=ncol(data))
#result[,1]<-data[1:nrow(data)-1,1]
result[,1]<-data[1:nrow(data),1]
#result[,2]<-(100000000(data[2:nrow(data),2]-data[1:nrow(data)-1,2]))/(data[2:nrow(data),1]-data[1:nrow(data)-1,1])
#result[,2]<-(data[2:nrow(data),2]-data[1:nrow(data)-1,2])
result[,2]<-(data[1:nrow(data),2]-data[1,2])/(1024*1024)
#result[,3]<-(1000000000*(data[2:nrow(data),3]-data[1:nrow(data)-1,3]))/(data[2:nrow(data),1]-data[1:nrow(data)-1,1])
#result[,3]<-(data[2:nrow(data),3]-data[1:nrow(data)-1,3])
result[,3]<-(data[1:nrow(data),3]-data[1,3])/(1024*1024)
result
}
| /RProjects/ResStat.R | permissive | abdul-git/yarn-monitoring | R | false | false | 4,405 | r | library("rjson")
readResStat<-function(root, type="DataNode", subtype="net")
{
resstat<-list()
class(resstat)<-"resstat"
resstat$name<-paste(type,subtype,sep="_")
resstat$files<-sort(list.files(root,pattern=paste("node.*",type,".*",subtype,sep="")))
if ( is.null(resstat$files))
null
resstat$data<-list()
for(i in 1:length(resstat$files))
{
resstat$data[[i]]<-as.matrix(read.table(paste(root,resstat$files[[i]],sep="/"),header=FALSE,sep=" "))
}
resstat
}
readResStatsOfRun<-function( runname, root)
{
resstats<-list()
class(resstats)<-"resstats"
resstats$runname<-paste("resstats_",runname,sep="")
resstats$data$datanode_net<-readResStat(root, "DataNode","net")
resstats$data$datanode_disk<-readResStat(root, "DataNode","disk")
resstats$data$nodemanager_net<-readResStat(root, "NodeManager","net")
resstats$data$nodemanager_disk<-readResStat(root, "NodeManager","disk")
resstats$data$yarnchild_net<-readResStat(root, "YarnChild","net")
resstats$data$yarnchild_disk<-readResStat(root, "YarnChild","disk")
resstats
}
slice<-function(l, indices)
{
j<-1
result<-list()
for(i in 1:length(indices))
{
result[[j]]<-l[[indices[i]]]
j<-j+1
}
result
}
plot.resstat<-function(resstat, netin=TRUE, nodes=1:length(resstat$data))
{
if (netin)
{
index<-2
ylab<-"net in (bytes)"
}
else
{
index<-3
ylab<-"net out (bytes)"
}
ymin<-0
ymax<-0
xmin<-resstat$data[[1]][1,1]
xmax<-resstat$data[[1]][1,1]
slicedData<-slice(resstat$data, nodes)
files<-slice(resstat$files, nodes)
transformedData<-list()
for(i in 1:length(slicedData))
{
transformedData[[i]]<-transform.resstat(slicedData[[i]])
}
for(i in 1:length(transformedData))
{
ymin<-min(ymin,min(transformedData[[i]][,index]-transformedData[[i]][1,index]))
ymax<-max(ymax,max(transformedData[[i]][,index]-transformedData[[i]][1,index]))
xmin<-min(xmin,min(transformedData[[i]][,1]))
xmax<-max(xmax,max(transformedData[[i]][,1]))
}
for(i in 1:length(transformedData))
{
if ( i==1)
plot(transformedData[[i]][,1], transformedData[[i]][,index]-transformedData[[i]][1,index], type="l", xlim=c(xmin,xmax),ylim=c(ymin,ymax), xlab=files[i], ylab=ylab)
else
lines(transformedData[[i]][,1], transformedData[[i]][,index]-transformedData[[i]][1,index], col=i)
}
}
plot.resstats<-function(resstats, nodename, input=TRUE)
{
if (input)
pos<-2
else
pos<-3
ry<-vector()
rx<-vector()
leg<-vector()
for(i in 1:length(resstats$data))
{
nodenames<-lapply(resstats$data[[i]]$files,FUN=function(x){strsplit(x,"_")[[1]][1]})
if ( is.na(match(nodename,nodenames)))
next
leg<-c(leg,resstats$data[[i]]$name)
#print(resstats$data[[i]]$name)
#print(match(nodename,nodenames))
#print(resstats$data[[i]]$data[match(nodename,nodenames)][[1]])
transformed<-transform.resstat(resstats$data[[i]]$data[match(nodename,nodenames)][[1]])
ry<-cbind(ry,range(transformed[,pos]))
rx<-cbind(rx,range(transformed[,1]))
}
plot.new()
plot.window(xlim=range(rx),ylim=range(ry))
axis(1)
axis(2)
if (input)
title(main="resources statistics in",xlab=nodename, ylab="MB")
else
title(main="resources statistics out",xlab=nodename, ylab="MB")
legend(x="left", y="top", legend=leg, col=c(1,2,3,4,5,6),lty=1)
for(i in 1:length(resstats$data))
{
nodenames<-lapply(resstats$data[[i]]$files,FUN=function(x){strsplit(x,"_")[[1]][1]})
if ( is.na(match(nodename,nodenames)))
next
transformed<-transform.resstat(resstats$data[[i]]$data[match(nodename,nodenames)][[1]])
lines(transformed[,1], transformed[,pos], col=i)
}
}
transform.resstat<-function(data)
{
#result<-matrix(0,nrow=nrow(data)-1, ncol=ncol(data))
result<-matrix(0,nrow=nrow(data), ncol=ncol(data))
#result[,1]<-data[1:nrow(data)-1,1]
result[,1]<-data[1:nrow(data),1]
#result[,2]<-(100000000(data[2:nrow(data),2]-data[1:nrow(data)-1,2]))/(data[2:nrow(data),1]-data[1:nrow(data)-1,1])
#result[,2]<-(data[2:nrow(data),2]-data[1:nrow(data)-1,2])
result[,2]<-(data[1:nrow(data),2]-data[1,2])/(1024*1024)
#result[,3]<-(1000000000*(data[2:nrow(data),3]-data[1:nrow(data)-1,3]))/(data[2:nrow(data),1]-data[1:nrow(data)-1,1])
#result[,3]<-(data[2:nrow(data),3]-data[1:nrow(data)-1,3])
result[,3]<-(data[1:nrow(data),3]-data[1,3])/(1024*1024)
result
}
|
%
% Copyright 2007-2021 by the individuals mentioned in the source code history
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
\name{MxBounds-class}
\alias{MxBounds-class}
\alias{MxBounds}
\title{MxBounds Class}
\description{
MxBounds is an S4 class. New instances of this class can
be created using the function \link{mxBounds}.
}
\details{
The MxBounds class has the following slots:
\tabular{rcl}{
\tab \tab \cr
min \tab - \tab The lower bound \cr
max \tab - \tab The upper bound \cr
parameters \tab - \tab The vector of parameter names \cr
}
The 'min' and 'max' slots hold scalar numeric values for the lower and upper bounds on the list of parameters, respectively.
Parameters may be any free parameter or parameters from an \link{MxMatrix} object. Parameters may be referenced either by name or by referring to their position in the 'spec' matrix of an \code{MxMatrix} object. To affect an estimation or optimization, an MxBounds object must be included in an \link{MxModel} object with all referenced \link{MxAlgebra} and \link{MxMatrix} objects.
Slots may be referenced with the $ symbol. See the documentation for \link[methods]{Classes} and the examples in the \link{mxBounds} document for more information.
}
\references{
The OpenMx User's guide can be found at \url{https://openmx.ssri.psu.edu/documentation/}.
}
\seealso{
\link{mxBounds} for the function that creates MxBounds objects. \link{MxMatrix} and \link{mxMatrix} for free parameter specification. More information about the OpenMx package may be found \link[=OpenMx]{here}.
}
| /man/MxBounds-class.Rd | no_license | OpenMx/OpenMx | R | false | false | 2,117 | rd | %
% Copyright 2007-2021 by the individuals mentioned in the source code history
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
\name{MxBounds-class}
\alias{MxBounds-class}
\alias{MxBounds}
\title{MxBounds Class}
\description{
MxBounds is an S4 class. New instances of this class can
be created using the function \link{mxBounds}.
}
\details{
The MxBounds class has the following slots:
\tabular{rcl}{
\tab \tab \cr
min \tab - \tab The lower bound \cr
max \tab - \tab The upper bound \cr
parameters \tab - \tab The vector of parameter names \cr
}
The 'min' and 'max' slots hold scalar numeric values for the lower and upper bounds on the list of parameters, respectively.
Parameters may be any free parameter or parameters from an \link{MxMatrix} object. Parameters may be referenced either by name or by referring to their position in the 'spec' matrix of an \code{MxMatrix} object. To affect an estimation or optimization, an MxBounds object must be included in an \link{MxModel} object with all referenced \link{MxAlgebra} and \link{MxMatrix} objects.
Slots may be referenced with the $ symbol. See the documentation for \link[methods]{Classes} and the examples in the \link{mxBounds} document for more information.
}
\references{
The OpenMx User's guide can be found at \url{https://openmx.ssri.psu.edu/documentation/}.
}
\seealso{
\link{mxBounds} for the function that creates MxBounds objects. \link{MxMatrix} and \link{mxMatrix} for free parameter specification. More information about the OpenMx package may be found \link[=OpenMx]{here}.
}
|
################################################################################
getFeatures <- function() {
################################################################################
features <- read.table("./data/UCI_HAR_Dataset/features.txt")
return(features)
}
################################################################################
main <- function() {
################################################################################
features <- getFeatures()
headers <- features$V2
# Appropriately labels the data set with descriptive variable names.
test <- read.table("./data/UCI_HAR_Dataset/test/X_test.txt", col.names = headers)
train <- read.table("./data/UCI_HAR_Dataset/train/X_train.txt", col.names = headers)
# Merge the training and the test sets to create one data set.
data <- rbind(test, train)
# Extract only the measuremnts on the mean and standard deviation for each measurement.
measurements <- features[grep("mean|std", features$V2),]$V2
data[,measurements]
# write this out
write.table(data[,measurements], file = "tidy.txt", row.names = FALSE)
}
| /run_analysis.R | no_license | SeanPlusPlus/getdata_course_project | R | false | false | 1,130 | r | ################################################################################
getFeatures <- function() {
################################################################################
features <- read.table("./data/UCI_HAR_Dataset/features.txt")
return(features)
}
################################################################################
main <- function() {
################################################################################
features <- getFeatures()
headers <- features$V2
# Appropriately labels the data set with descriptive variable names.
test <- read.table("./data/UCI_HAR_Dataset/test/X_test.txt", col.names = headers)
train <- read.table("./data/UCI_HAR_Dataset/train/X_train.txt", col.names = headers)
# Merge the training and the test sets to create one data set.
data <- rbind(test, train)
# Extract only the measuremnts on the mean and standard deviation for each measurement.
measurements <- features[grep("mean|std", features$V2),]$V2
data[,measurements]
# write this out
write.table(data[,measurements], file = "tidy.txt", row.names = FALSE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.R
\name{ndbc_munge}
\alias{ndbc_munge}
\title{Munge raw NDBC data frame}
\usage{
ndbc_munge(data)
}
\arguments{
\item{data}{a data frame}
}
\value{
a better data frame
}
\description{
Munge raw NDBC data frame
}
| /man/ndbc_munge.Rd | no_license | evmo/ndbc | R | false | true | 295 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.R
\name{ndbc_munge}
\alias{ndbc_munge}
\title{Munge raw NDBC data frame}
\usage{
ndbc_munge(data)
}
\arguments{
\item{data}{a data frame}
}
\value{
a better data frame
}
\description{
Munge raw NDBC data frame
}
|
library(shiny)
library(rsconnect)
library(magrittr)
library(lubridate)
library(stringr)
library(tibble)
library(broom)
library(ggplot2)
library(gt)
library(knitr)
library(devtools)
library(foreach)
library(dplyr)
library(data.table)
library(httr)
library(scales)
library(tidyr)
options(scipen=999)
## Define the simulate function
simulate <- function(
quar.rate = 0, ## Rate at which an individual goes from Susceptible to Quarantined
max_quar = 0, # Quarantine peak
quar.rate.i = 1, ## Rate at which an individual goes from Infected to Quarantined Infected
## Transmission paratemters
lambda = 14, ## Encounters between Susceptible and Infected (symptomatic) per day
rho_s = 0.012, ## Infection probability between Susceptible and Infected (symptomatic) => AKA rate of transmission
rho_a = 0.012, ## Infection probability between Susceptible and Exposed
lambda_q = 7, ## Quarantine encounters (lambda_q / lambda = efficiency in reducing contacts)
## Medical parameters
prog.rate = 1/6, # Rate per day from Exposed to Infected (symptomatic)
rec.rate = 1/14, # Rate per day from Infected (symptomatic) to Recovered
hosp.rate = 1/100, # Rate per day from Infected to Hospitalization
disch.rate = 1/7, # Rate per day from Hospitalization to Recovered
fat.rate.base = 1/50, # Rate per day from Hospitalization to Fatality
total_time,
initial) {
sim.matrix <- data.frame(matrix(NA, nrow = total_time, ncol = 10))
colnames(sim.matrix) <- names(initial)
sim.matrix$time[1] = initial[1]
sim.matrix$s.num[1] = initial[2]
sim.matrix$e.num[1] = initial[3]
sim.matrix$i.num[1] = initial[4]
sim.matrix$q.num[1] = initial[5]
sim.matrix$qe.num[1] = initial[6]
sim.matrix$qi.num[1] = initial[7]
sim.matrix$h.num[1] = initial[8]
sim.matrix$r.num[1] = initial[9]
sim.matrix$f.num[1] = initial[10]
for (t in 2:total_time) {
sim.matrix$time[t] = t
sim.matrix$s.num[t] <- max(sim.matrix$s.num[t-1],0) ## Initial S
sim.matrix$e.num[t] <- sim.matrix$e.num[t-1] ## Initial E
sim.matrix$i.num[t] <- sim.matrix$i.num[t-1] ## Initial I
sim.matrix$q.num[t] <- sim.matrix$q.num[t-1] ## Initial Q
sim.matrix$qe.num[t] <- sim.matrix$qe.num[t-1] ## Initial QE
sim.matrix$qi.num[t] <- sim.matrix$qi.num[t-1] ## Initial QI
sim.matrix$h.num[t] <- sim.matrix$h.num[t-1] ## Initial H
sim.matrix$f.num[t] <- sim.matrix$f.num[t-1] ## Initial F
sim.matrix$r.num[t] <- sim.matrix$r.num[t-1] ## Initial R
# Mass of quarantined and non quarantined (M)
M = lambda * (sim.matrix$s.num[t] + sim.matrix$e.num[t] + sim.matrix$i.num[t] + sim.matrix$r.num[t]) +
lambda_q * (sim.matrix$q.num[t] + sim.matrix$qe.num[t] + sim.matrix$qi.num[t])
# MASS of infected (symptomatic (I) + asymptomatic (E) )
MI = ( lambda*(sim.matrix$i.num[t] + sim.matrix$e.num[t]) +
lambda_q*(sim.matrix$qi.num[t] + sim.matrix$qe.num[t]) )
# Identifying probability of Exposed or Infected
pi_I = MI / M
# Identifying probability of Infected (symptomatic), given Exposure
pi_II = (lambda*sim.matrix$i.num[t] + lambda_q*sim.matrix$qi.num[t]) / MI
# Identifying probability of Non Symptomatic (E), given Exposure
pi_IE = (lambda*sim.matrix$e.num[t] + lambda_q*sim.matrix$qe.num[t]) / MI
# Identifying probability of Infection, conditional on a random meeting:
alpha = pi_I*(pi_II*rho_s + pi_IE*rho_a)
# Verifying if quarantine reached the peak
q_rate <- ifelse(
sim.matrix$q.num[t]/(sum(sim.matrix[t,2:10]) - sim.matrix$f.num[t] - sim.matrix$h.num[t]) < max_quar,
quar.rate,
0)
## FROM S to Q
trans_S_Q <- q_rate * sim.matrix$s.num[t]
sim.matrix$s.num[t] <- sim.matrix$s.num[t] - trans_S_Q ## Update S
sim.matrix$q.num[t] <- sim.matrix$q.num[t] + trans_S_Q ## Update Q
## FROM S to E
trans_S_E <- (alpha * lambda) * sim.matrix$s.num[t]
sim.matrix$s.num[t] <- sim.matrix$s.num[t] - trans_S_E ## Update S
sim.matrix$e.num[t] <- sim.matrix$e.num[t] + trans_S_E ## Update E
## FROM E to I
trans_E_I <- sim.matrix$e.num[t]*prog.rate
sim.matrix$e.num[t] <- sim.matrix$e.num[t] - trans_E_I ## Update E
sim.matrix$i.num[t] <- sim.matrix$i.num[t] + trans_E_I ## Update I
## FROM (Q, E) to QE
trans_Q_QE <- (alpha * lambda_q) * sim.matrix$q.num[t]
trans_E_QE <- sim.matrix$e.num[t] * q_rate
sim.matrix$q.num[t] <- sim.matrix$q.num[t] - trans_Q_QE ## Update Q
sim.matrix$e.num[t] <- sim.matrix$e.num[t] - trans_E_QE ## Update E
sim.matrix$qe.num[t] <- sim.matrix$qe.num[t] + trans_Q_QE + trans_E_QE ## Update QE
## FROM (QE, I) to QI
trans_QE_QI <- sim.matrix$qe.num[t]*prog.rate
trans_I_QI <- sim.matrix$i.num[t]*quar.rate.i
sim.matrix$qe.num[t] <- sim.matrix$qe.num[t] - trans_QE_QI ## Update QE
sim.matrix$i.num[t] <- sim.matrix$i.num[t] - trans_I_QI ## Update I
sim.matrix$qi.num[t] <- sim.matrix$qi.num[t] + trans_QE_QI + trans_I_QI ## Update QI
## FROM (I, QI) to H
trans_I_H <- sim.matrix$i.num[t]*hosp.rate
trans_QI_H <- sim.matrix$qi.num[t]*hosp.rate
sim.matrix$i.num[t] <- sim.matrix$i.num[t] - trans_I_H ## Update I
sim.matrix$qi.num[t] <- sim.matrix$qi.num[t] - trans_QI_H ## Update QI
sim.matrix$h.num[t] <- sim.matrix$h.num[t] + trans_I_H + trans_QI_H ## Update H
## FROM (I, QI, H) to R
trans_I_R <- sim.matrix$i.num[t]*rec.rate
trans_QI_R <- sim.matrix$qi.num[t]*rec.rate
trans_H_R <- sim.matrix$h.num[t]*disch.rate
sim.matrix$i.num[t] <- sim.matrix$i.num[t] - trans_I_R ## Update I
sim.matrix$qi.num[t] <- sim.matrix$qi.num[t] - trans_QI_R ## Update QI
sim.matrix$h.num[t] <- sim.matrix$h.num[t] - trans_H_R ## Update H
sim.matrix$r.num[t] <- sim.matrix$r.num[t] + trans_I_R + trans_QI_R + trans_H_R ## Update R
## FROM H to F
trans_H_F <- sim.matrix$h.num[t]*fat.rate.base
sim.matrix$h.num[t] <- sim.matrix$h.num[t] - trans_H_F ## Update H
sim.matrix$f.num[t] <- sim.matrix$f.num[t] + trans_H_F ## Update F
}
## Adding up total infections
sim.matrix$ti.num = (sim.matrix$i.num + sim.matrix$qi.num)
return(sim.matrix)
}
plot1 <- function(sim_data, x_limit) {
## Visualizing prevalence:
baseline_plot <- sim_data %>% # use only the prevalence columns
select(time, ti.num, h.num, f.num) %>%
filter(time <= x_limit) %>%
pivot_longer(-c(time), names_to = "Groups",
values_to = "count")
baseline_plot$Groups <- factor(baseline_plot$Groups, levels =
c("ti.num", "h.num", "f.num"))
## Define a standard set of colours to represent compartments and plot
compcols <- c(ti.num = "orange",
h.num = "red",
f.num = "black",
e.num = "cyan")
complabels <- c(ti.num = "Mildly Infected",
h.num = "Severely Infected, Requires Hospitalisation",
f.num = "Fatalities",
e.num = "Exposed (asymptomatic)")
baseline_plot %>% ggplot(aes(x = time, y = count, colour = Groups, fill = Groups)) +
geom_area(size = 1.25, alpha = 0.7) +
scale_colour_manual(name = "", values = compcols,
labels = complabels) +
scale_fill_manual(name = "", values = compcols,
labels = complabels) +
scale_y_continuous(label = comma) +
labs(title = "Simulation Results",
x = "Days since beginning of simulation", y = "Prevalence (persons)") +
theme(legend.position="bottom")
}
plot2 <- function(sim_data, x_limit, icu_beds) {
## Visualizing prevalence:
sim_data$icu <- icu_beds
baseline_plot <- sim_data %>% # use only the prevalence columns
select(time, h.num, f.num, icu) %>%
filter(time <= x_limit) %>%
pivot_longer(-c(time), names_to = "Groups",
values_to = "count")
area_plot <- sim_data %>% # use only the prevalence columns
select(time, h.num) %>%
filter(time <= x_limit) %>%
pivot_longer(-c(time), names_to = "Groups",
values_to = "count")
## Define a standard set of colours to represent compartments and plot
compcols <- c(h.num = "red",
f.num = "black",
icu = "red")
complabels <- c(h.num = "Requires Hospitalisation",
f.num = "Fatalities",
icu = "Number of ICU beds")
ggplot() +
geom_line(data = baseline_plot,
aes(x = time, y = count, linetype=Groups, col = Groups), size = 1, alpha = 0.7) +
geom_area(data = area_plot,
aes(x = time, y = count, linetype=Groups, col = Groups, fill = "red"),
size = 1, alpha = 0.7, show.legend = FALSE) +
scale_colour_manual(name = "", values = compcols,
labels = complabels) +
scale_linetype_manual(name = "", values=c(h.num = "solid", f.num = "solid", icu = "dotted"),
labels = complabels) +
scale_y_continuous(label = comma) +
labs(title = "Simulation Results",
x = "Days since beginning of epidemic", y = "Prevalence (persons)") +
theme(legend.position="bottom")
}
plot_backtest <- function(cases, sim_data1, sim_data2, x_lim) {
sim <- sim_data1
sim_quar <- sim_data2
load_data_sp <- function(case_initial) {
raw <- fread("https://raw.githubusercontent.com/seade-R/dados-covid-sp/master/data/dados_covid_sp.csv")
dt <- raw[, c("datahora", "casos", "obitos") ]
dt <- dt[,lapply(.SD,sum),by="datahora"]
dt <- dt[casos > case_initial]
dt <- dt[,-c("datahora", "casos")]
dt <- dt[, time := sequence(.N)]
setcolorder(dt, c("time", "obitos"))
setnames(dt, "obitos", "f.num")
return(dt)
}
dt_sp <- load_data_sp(case_initial = cases)
dt_sp <- dt_sp[,"region" := "SP"]
day_limit <- x_lim ## set limit day when pulling data from model
model <- sim %>% select(time, f.num) %>% filter(time <= day_limit)
model <- setDT(model)
model <- model[, "region" := "model"]
model_quar <- sim_quar %>% select(time, f.num) %>% filter(time <= day_limit)
model_quar <- setDT(model_quar)
model_quar <- model_quar[, "region" := "model_quar"]
model_quar$f.num <- model_quar$f.num + dt_sp$f.num[1]
dt_backtest <- rbind(dt_sp, model_quar, fill = T)
## PLOT: PREDICTED VS REALIZED: SP
group.colors <- c(SP = "black", model = "blue", model_quar = "blue")
complabels <- c(model = "Sem quarentena", model_quar = "Simulated results")
y_lim <- max(dt_backtest[ time < day_limit, f.num])
# Plot
ggplot(data = dt_backtest, aes(x = time, y = f.num)) +
geom_line(aes(color = factor(region))) + geom_point(shape=1, alpha = 0.5) +
xlim(1, day_limit) + ylim(NA,y_lim) + scale_color_manual(values=group.colors, labels = complabels) +
scale_y_continuous(label = comma, limits=c(0, y_lim)) +
theme(legend.title=element_blank()) +
labs(title = "Projetado x Realizado, Estado de SP",
x = "Days since beginning of simulation", y = "Deaths") + theme(legend.title=element_blank())
}
economy <- function(pop, x_limit, sim_data1, sim_data2) {
population <- pop
economy_base <- sim_data1 %>% # use only the prevalence columns
select(time, i.num, h.num, f.num, q.num, qi.num) %>%
# examine only the first 100 days since it is all over by
# then using the default parameters
filter(time <= x_limit)
economy_base$qi.perc <- economy_base$qi.num/population
economy_base$i.perc <- economy_base$i.num/population
economy_base$h.perc <- economy_base$h.num/population
economy_base$f.perc <- economy_base$f.num/population
economy_base$q.perc <- economy_base$q.num/population
economy_base$gdp <- 1 - economy_base$qi.perc - economy_base$i.perc - economy_base$h.perc - economy_base$f.perc - economy_base$q.perc*0.5
## Storing the number of infected and fatalities in the next year
economy_sim <- sim_data2 %>% # use only the prevalence columns
select(time, i.num, h.num, f.num, q.num, qi.num) %>%
# examine only the first 100 days since it is all over by
# then using the default parameters
filter(time <= x_limit)
economy_sim$qi.perc <- economy_sim$qi.num/population
economy_sim$i.perc <- economy_sim$i.num/population
economy_sim$h.perc <- economy_sim$h.num/population
economy_sim$f.perc <- economy_sim$f.num/population
economy_sim$q.perc <- economy_sim$q.num/population
economy_sim$gdp <- 1 - economy_sim$qi.perc - economy_sim$i.perc - economy_sim$h.perc - economy_sim$f.perc - economy_sim$q.perc*0.5
economy <- data.frame(economy_base$gdp)
economy <- economy_base %>% select(time, gdp)
economy <- cbind(economy, economy_sim$gdp )
names(economy)[2] <- "gdp_base"
names(economy)[3] <- "gdp_sim"
## Visualizing gdp effect:
economy_plot_df <- economy %>% # use only the prevalence columns
select(time, gdp_base, gdp_sim) %>%
# examine only the first 100 days since it is all over by
# then using the default parameters
filter(time <= 360) %>% pivot_longer(-c(time), names_to = "Groups",
values_to = "count")
# define a standard set of colours to represent compartments
compcols <- c(gdp_base = "black", gdp_sim = "blue" )
complabels <- c(gdp_base = "Baseline", gdp_sim = "Quarantine")
economy_plot_df %>% ggplot(aes(x = time, y = count, colour = Groups)) +
geom_line(size = 1.25, alpha = 0.7) +
scale_colour_manual(values = compcols,
labels = complabels) +
labs(title = "GDP Impact: Simulation vs Non-Quarantine",
x = "Days since beginning of epidemic", y = "GDP") +
theme(legend.position="bottom")
}
# Define UI for app that simulates and draws the curves ----
ui <- shinyUI(fluidPage(
## Google Tag (remove this or substitute the html file with your own google tag)
tags$head(tags$script(HTML("google-analytics.html"))),
# App title ----
titlePanel("Simulador: SEIR + Quarentena + Economia"),
plotOutput(outputId = "plot1"),
fluidRow(column(4, numericInput(inputId = "input_xaxis1",
label = "Input max days in X axis",
min = 1,
max = 720,
value = 360))),
actionButton("go", "Calculate", style="color: white; background-color: blue"),
actionButton("reset", "Reset parameters"),
# Sidebar panel for inputs ----
fluidRow(
column(4,
h4(textOutput(outputId = "r0"), style="color: blue"),
h4(textOutput(outputId = "caption1")),
sliderInput(inputId = "input_lambda",
label = "Daily encounters:",
min = 1,
max = 50,
value = 14),
sliderInput(inputId = "input_rho",
label = "Infection probability (%), given encounter:",
min = 0.1,
max = 5,
step = 0.1,
value = 1.2,
round = T),
sliderInput(inputId = "input_prog",
label = "Number of days: Exposed to Infected",
min = 1,
max = 20,
step = 0.1,
value = 5.2,
round = T),
sliderInput(inputId = "input_rec",
label = "Number of days: Infected to Recovered",
min = 1,
max = 20,
step = 0.1,
value = 14,
round = T),
sliderInput(inputId = "input_hosp",
label = "Probability of hospitalization, given Infection (%)",
min = 0.1,
max = 10,
step = 0.1,
value = 1,
round = T),
sliderInput(inputId = "input_hosprec",
label = "Number of days: Hospitalization to Recovered",
min = 1,
max = 25,
step = 1,
value = 7,
round = T),
sliderInput(inputId = "input_fat",
label = "Probability of Fatality, given Hospitalization (%)",
min = 0.1,
max = 20,
step = 0.1,
value = 2,
round = T)
),
column(4,
h4(textOutput(outputId = "r0_quar"), style="color: blue"),
h4(textOutput(outputId = "caption2")),
sliderInput(inputId = "input_lambda_q",
label = "Daily encounters (in quarantine):",
min = 1,
max = 50,
value = 7),
sliderInput(inputId = "input_quar",
label = "Daily quarantine rate (%)",
min = 0,
max = 10,
step = .5,
value = 3,
round = T),
sliderInput(inputId = "input_quar.i",
label = "Infected to Quarantine rate (%)",
min = 0,
max = 100,
step = 1,
value = 100,
round = T),
sliderInput(inputId = "input_maxquar",
label = "Maximum quarantined population (%)",
min = 0,
max = 100,
step = 1,
value = 50,
round = T),
h4(textOutput(outputId = "caption3")),
numericInput(inputId = "input_pop",
label = "Population",
value = 44000000),
numericInput(inputId = "input_popquar",
label = "Initial population Quarantined",
value = 0),
numericInput(inputId = "input_popinfa",
label = "Initial population Exposed",
value = 5000),
numericInput(inputId = "input_popinf",
label = "Initial population Infected",
value = 2500),
numericInput(inputId = "input_icu",
label = "Number of ICU beds",
value = 100000)
),
column(4,
plotOutput(outputId = "plot2"),
plotOutput(outputId = "plot3")),
textOutput(outputId = "economy_desc")
),
h3(textOutput(outputId = "caption4")),
plotOutput(outputId = "plot4"),
fluidRow(column(4, numericInput(inputId = "input_cases",
label = "Escolha numero de casos iniciais",
min = 1,
max = 10000000,
value = 1)),
column(8,
numericInput(inputId = "input_xaxis",
label = "Ajuste numero maximo de dias no Eixo X",
min = 1,
max = 720,
value = 120))
),
textOutput(outputId = "caption5"),
uiOutput("tab0"),
h3(textOutput(outputId = "caption6")),
uiOutput("tab1"),
uiOutput("tab2")
))
server <- function(input, output, session) {
# 0. Reset sliders to initial values after clicking RESET
observeEvent(input$reset,{
updateSliderInput(session,'input_lambda',value = 14)
updateSliderInput(session,'input_rho',value = 1.2)
updateSliderInput(session,'input_prog',value = 5.2)
updateSliderInput(session,'input_rec',value = 14)
updateSliderInput(session,'input_hosp',value = 1)
updateSliderInput(session,'input_hosprec',value = 7)
updateSliderInput(session,'input_fat',value = 2)
updateSliderInput(session,'input_quar',value = 3)
updateSliderInput(session,'input_quar.i',value = 100)
updateSliderInput(session,'input_maxquar',value = 50)
updateSliderInput(session,'input_lambda_q',value = 7)
})
# 1. Update data after clicking buttom
sim_data_quar <- eventReactive(input$go, {
simulate(
initial = c(
time = 1,
s.num = input$input_pop - input$input_popquar - input$input_popinf - input$input_popinfa ,
e.num = input$input_popinfa,
i.num = input$input_popinf,
q.num = input$input_popquar,
qe.num = 0,
qi.num = 0,
h.num = 0,
r.num = 0,
f.num = 0),
total_time = 720,
## Parameters
lambda = input$input_lambda,
rho_s = input$input_rho/100,
rho_a = input$input_rho/100,
prog.rate = 1/input$input_prog,
rec.rate = 1/input$input_rec,
hosp.rate = input$input_hosp/100,
disch.rate = 1/input$input_hosprec,
fat.rate.base = input$input_fat/100,
quar.rate = input$input_quar/100,
quar.rate.i = input$input_quar.i/100,
max_quar = input$input_maxquar/100,
lambda_q = input$input_lambda_q
)
}, ignoreNULL = FALSE)
sim_data_noquar <- eventReactive(input$go, {
simulate(initial = c(
time = 1,
s.num = input$input_pop,
e.num = input$input_popinfa,
i.num = input$input_popinf,
q.num = 0,
qe.num = 0,
qi.num = 0,
h.num = 0,
r.num = 0,
f.num = 0),
total_time = 720,
## Parameters
lambda = input$input_lambda,
rho_s = input$input_rho/100,
rho_a = input$input_rho/100,
prog.rate = 1/input$input_prog,
rec.rate = 1/input$input_rec,
hosp.rate = input$input_hosp/100,
disch.rate = 1/input$input_hosprec,
fat.rate.base = input$input_fat/100,
quar.rate = 0,
quar.rate.i = 0,
max_quar = input$input_maxquar/100,
lambda_q = input$input_lambda_q)
}, ignoreNULL = FALSE)
# 2. Outputs:
output$r0 <- renderText({ paste("R0 = ",
round( ((input$input_rho/100)*input$input_lambda/(1/input$input_prog))*
(1+(1/input$input_prog)/(1/input$input_rec +
input$input_hosp/100*(1-1/input$input_hosprec)*input$input_fat/100 )), 3)) })
output$r0_quar <- renderText({ paste("Quarantine R0 = ",
round( ((input$input_rho/100)*input$input_lambda_q/(1/input$input_prog))*
(1+(1/input$input_prog)/(1/input$input_rec +
input$input_hosp/100*(1-1/input$input_hosprec)*input$input_fat/100 )), 3)) })
output$caption1 <- renderText({ "Medical Parameters" })
output$caption2 <- renderText({ "Quarantine Parameters" })
output$caption3 <- renderText({ "Population Parameters" })
output$caption4 <- renderText({ "Comparativo fatalidades projetadas x realizadas para o estado de SP:" })
output$plot1 <- renderPlot({
plot1(x_limit = input$input_xaxis1 ,
sim_data = sim_data_quar())
})
output$plot2 <- renderPlot({
plot2(x_limit = input$input_xaxis1,
sim_data = sim_data_quar(), icu_beds = input$input_icu)
})
output$plot3 <- renderPlot({
economy(x_limit = input$input_xaxis1,
pop = input$input_pop,
sim_data1 = sim_data_noquar(),
sim_data2 = sim_data_quar())
})
output$plot4 <- renderPlot({
plot_backtest(cases = input$input_cases , sim_data1 = sim_data_noquar(),
sim_data2 = sim_data_quar(), input$input_xaxis)
})
output$caption5 <- renderText({ "Dados de SP são da Secretaria de Estado da Saúde de São Paulo (SES)" })
output$caption6 <- renderText({ "References" })
url1 <- a("Tim Churches Health Data Science Blog", href="https://timchurches.github.io/blog/posts/2020-03-18-modelling-the-effects-of-public-health-interventions-on-covid-19-transmission-part-2/")
url2 <- a("An SEIR Infectious Disease Model with Testing and Conditional Quarantine", href = "https://www.nber.org/papers/w26901")
url3 <- a("post", href="http://academicosdobar.site/posts/2020-06-03-simulador-covid-quarentena-economia/")
output$tab0 <- renderUI({
tagList("Veja comentários acerca da incerteza da previsão neste " , url3)
})
output$tab1 <- renderUI({
tagList("SEIR model:" , url1)
})
output$tab2 <- renderUI({
tagList("Quarantine and GDP calculation: Berger, Herkenhoff & Mongey (2020)" , url2)
})
output$economy_desc <- renderText({
"Comments: A worker in quarantine is 50% less productive than a non-quarantine worker. An infected worker does not produce (see references)."
})
}
shinyApp(ui, server) | /seir_simulator/app.R | no_license | alexandrepecora/shiny_app | R | false | false | 27,649 | r |
library(shiny)
library(rsconnect)
library(magrittr)
library(lubridate)
library(stringr)
library(tibble)
library(broom)
library(ggplot2)
library(gt)
library(knitr)
library(devtools)
library(foreach)
library(dplyr)
library(data.table)
library(httr)
library(scales)
library(tidyr)
options(scipen=999)
## Define the simulate function
simulate <- function(
quar.rate = 0, ## Rate at which an individual goes from Susceptible to Quarantined
max_quar = 0, # Quarantine peak
quar.rate.i = 1, ## Rate at which an individual goes from Infected to Quarantined Infected
## Transmission paratemters
lambda = 14, ## Encounters between Susceptible and Infected (symptomatic) per day
rho_s = 0.012, ## Infection probability between Susceptible and Infected (symptomatic) => AKA rate of transmission
rho_a = 0.012, ## Infection probability between Susceptible and Exposed
lambda_q = 7, ## Quarantine encounters (lambda_q / lambda = efficiency in reducing contacts)
## Medical parameters
prog.rate = 1/6, # Rate per day from Exposed to Infected (symptomatic)
rec.rate = 1/14, # Rate per day from Infected (symptomatic) to Recovered
hosp.rate = 1/100, # Rate per day from Infected to Hospitalization
disch.rate = 1/7, # Rate per day from Hospitalization to Recovered
fat.rate.base = 1/50, # Rate per day from Hospitalization to Fatality
total_time,
initial) {
sim.matrix <- data.frame(matrix(NA, nrow = total_time, ncol = 10))
colnames(sim.matrix) <- names(initial)
sim.matrix$time[1] = initial[1]
sim.matrix$s.num[1] = initial[2]
sim.matrix$e.num[1] = initial[3]
sim.matrix$i.num[1] = initial[4]
sim.matrix$q.num[1] = initial[5]
sim.matrix$qe.num[1] = initial[6]
sim.matrix$qi.num[1] = initial[7]
sim.matrix$h.num[1] = initial[8]
sim.matrix$r.num[1] = initial[9]
sim.matrix$f.num[1] = initial[10]
for (t in 2:total_time) {
sim.matrix$time[t] = t
sim.matrix$s.num[t] <- max(sim.matrix$s.num[t-1],0) ## Initial S
sim.matrix$e.num[t] <- sim.matrix$e.num[t-1] ## Initial E
sim.matrix$i.num[t] <- sim.matrix$i.num[t-1] ## Initial I
sim.matrix$q.num[t] <- sim.matrix$q.num[t-1] ## Initial Q
sim.matrix$qe.num[t] <- sim.matrix$qe.num[t-1] ## Initial QE
sim.matrix$qi.num[t] <- sim.matrix$qi.num[t-1] ## Initial QI
sim.matrix$h.num[t] <- sim.matrix$h.num[t-1] ## Initial H
sim.matrix$f.num[t] <- sim.matrix$f.num[t-1] ## Initial F
sim.matrix$r.num[t] <- sim.matrix$r.num[t-1] ## Initial R
# Mass of quarantined and non quarantined (M)
M = lambda * (sim.matrix$s.num[t] + sim.matrix$e.num[t] + sim.matrix$i.num[t] + sim.matrix$r.num[t]) +
lambda_q * (sim.matrix$q.num[t] + sim.matrix$qe.num[t] + sim.matrix$qi.num[t])
# MASS of infected (symptomatic (I) + asymptomatic (E) )
MI = ( lambda*(sim.matrix$i.num[t] + sim.matrix$e.num[t]) +
lambda_q*(sim.matrix$qi.num[t] + sim.matrix$qe.num[t]) )
# Identifying probability of Exposed or Infected
pi_I = MI / M
# Identifying probability of Infected (symptomatic), given Exposure
pi_II = (lambda*sim.matrix$i.num[t] + lambda_q*sim.matrix$qi.num[t]) / MI
# Identifying probability of Non Symptomatic (E), given Exposure
pi_IE = (lambda*sim.matrix$e.num[t] + lambda_q*sim.matrix$qe.num[t]) / MI
# Identifying probability of Infection, conditional on a random meeting:
alpha = pi_I*(pi_II*rho_s + pi_IE*rho_a)
# Verifying if quarantine reached the peak
q_rate <- ifelse(
sim.matrix$q.num[t]/(sum(sim.matrix[t,2:10]) - sim.matrix$f.num[t] - sim.matrix$h.num[t]) < max_quar,
quar.rate,
0)
## FROM S to Q
trans_S_Q <- q_rate * sim.matrix$s.num[t]
sim.matrix$s.num[t] <- sim.matrix$s.num[t] - trans_S_Q ## Update S
sim.matrix$q.num[t] <- sim.matrix$q.num[t] + trans_S_Q ## Update Q
## FROM S to E
trans_S_E <- (alpha * lambda) * sim.matrix$s.num[t]
sim.matrix$s.num[t] <- sim.matrix$s.num[t] - trans_S_E ## Update S
sim.matrix$e.num[t] <- sim.matrix$e.num[t] + trans_S_E ## Update E
## FROM E to I
trans_E_I <- sim.matrix$e.num[t]*prog.rate
sim.matrix$e.num[t] <- sim.matrix$e.num[t] - trans_E_I ## Update E
sim.matrix$i.num[t] <- sim.matrix$i.num[t] + trans_E_I ## Update I
## FROM (Q, E) to QE
trans_Q_QE <- (alpha * lambda_q) * sim.matrix$q.num[t]
trans_E_QE <- sim.matrix$e.num[t] * q_rate
sim.matrix$q.num[t] <- sim.matrix$q.num[t] - trans_Q_QE ## Update Q
sim.matrix$e.num[t] <- sim.matrix$e.num[t] - trans_E_QE ## Update E
sim.matrix$qe.num[t] <- sim.matrix$qe.num[t] + trans_Q_QE + trans_E_QE ## Update QE
## FROM (QE, I) to QI
trans_QE_QI <- sim.matrix$qe.num[t]*prog.rate
trans_I_QI <- sim.matrix$i.num[t]*quar.rate.i
sim.matrix$qe.num[t] <- sim.matrix$qe.num[t] - trans_QE_QI ## Update QE
sim.matrix$i.num[t] <- sim.matrix$i.num[t] - trans_I_QI ## Update I
sim.matrix$qi.num[t] <- sim.matrix$qi.num[t] + trans_QE_QI + trans_I_QI ## Update QI
## FROM (I, QI) to H
trans_I_H <- sim.matrix$i.num[t]*hosp.rate
trans_QI_H <- sim.matrix$qi.num[t]*hosp.rate
sim.matrix$i.num[t] <- sim.matrix$i.num[t] - trans_I_H ## Update I
sim.matrix$qi.num[t] <- sim.matrix$qi.num[t] - trans_QI_H ## Update QI
sim.matrix$h.num[t] <- sim.matrix$h.num[t] + trans_I_H + trans_QI_H ## Update H
## FROM (I, QI, H) to R
trans_I_R <- sim.matrix$i.num[t]*rec.rate
trans_QI_R <- sim.matrix$qi.num[t]*rec.rate
trans_H_R <- sim.matrix$h.num[t]*disch.rate
sim.matrix$i.num[t] <- sim.matrix$i.num[t] - trans_I_R ## Update I
sim.matrix$qi.num[t] <- sim.matrix$qi.num[t] - trans_QI_R ## Update QI
sim.matrix$h.num[t] <- sim.matrix$h.num[t] - trans_H_R ## Update H
sim.matrix$r.num[t] <- sim.matrix$r.num[t] + trans_I_R + trans_QI_R + trans_H_R ## Update R
## FROM H to F
trans_H_F <- sim.matrix$h.num[t]*fat.rate.base
sim.matrix$h.num[t] <- sim.matrix$h.num[t] - trans_H_F ## Update H
sim.matrix$f.num[t] <- sim.matrix$f.num[t] + trans_H_F ## Update F
}
## Adding up total infections
sim.matrix$ti.num = (sim.matrix$i.num + sim.matrix$qi.num)
return(sim.matrix)
}
plot1 <- function(sim_data, x_limit) {
## Visualizing prevalence:
baseline_plot <- sim_data %>% # use only the prevalence columns
select(time, ti.num, h.num, f.num) %>%
filter(time <= x_limit) %>%
pivot_longer(-c(time), names_to = "Groups",
values_to = "count")
baseline_plot$Groups <- factor(baseline_plot$Groups, levels =
c("ti.num", "h.num", "f.num"))
## Define a standard set of colours to represent compartments and plot
compcols <- c(ti.num = "orange",
h.num = "red",
f.num = "black",
e.num = "cyan")
complabels <- c(ti.num = "Mildly Infected",
h.num = "Severely Infected, Requires Hospitalisation",
f.num = "Fatalities",
e.num = "Exposed (asymptomatic)")
baseline_plot %>% ggplot(aes(x = time, y = count, colour = Groups, fill = Groups)) +
geom_area(size = 1.25, alpha = 0.7) +
scale_colour_manual(name = "", values = compcols,
labels = complabels) +
scale_fill_manual(name = "", values = compcols,
labels = complabels) +
scale_y_continuous(label = comma) +
labs(title = "Simulation Results",
x = "Days since beginning of simulation", y = "Prevalence (persons)") +
theme(legend.position="bottom")
}
plot2 <- function(sim_data, x_limit, icu_beds) {
## Visualizing prevalence:
sim_data$icu <- icu_beds
baseline_plot <- sim_data %>% # use only the prevalence columns
select(time, h.num, f.num, icu) %>%
filter(time <= x_limit) %>%
pivot_longer(-c(time), names_to = "Groups",
values_to = "count")
area_plot <- sim_data %>% # use only the prevalence columns
select(time, h.num) %>%
filter(time <= x_limit) %>%
pivot_longer(-c(time), names_to = "Groups",
values_to = "count")
## Define a standard set of colours to represent compartments and plot
compcols <- c(h.num = "red",
f.num = "black",
icu = "red")
complabels <- c(h.num = "Requires Hospitalisation",
f.num = "Fatalities",
icu = "Number of ICU beds")
ggplot() +
geom_line(data = baseline_plot,
aes(x = time, y = count, linetype=Groups, col = Groups), size = 1, alpha = 0.7) +
geom_area(data = area_plot,
aes(x = time, y = count, linetype=Groups, col = Groups, fill = "red"),
size = 1, alpha = 0.7, show.legend = FALSE) +
scale_colour_manual(name = "", values = compcols,
labels = complabels) +
scale_linetype_manual(name = "", values=c(h.num = "solid", f.num = "solid", icu = "dotted"),
labels = complabels) +
scale_y_continuous(label = comma) +
labs(title = "Simulation Results",
x = "Days since beginning of epidemic", y = "Prevalence (persons)") +
theme(legend.position="bottom")
}
plot_backtest <- function(cases, sim_data1, sim_data2, x_lim) {
sim <- sim_data1
sim_quar <- sim_data2
load_data_sp <- function(case_initial) {
raw <- fread("https://raw.githubusercontent.com/seade-R/dados-covid-sp/master/data/dados_covid_sp.csv")
dt <- raw[, c("datahora", "casos", "obitos") ]
dt <- dt[,lapply(.SD,sum),by="datahora"]
dt <- dt[casos > case_initial]
dt <- dt[,-c("datahora", "casos")]
dt <- dt[, time := sequence(.N)]
setcolorder(dt, c("time", "obitos"))
setnames(dt, "obitos", "f.num")
return(dt)
}
dt_sp <- load_data_sp(case_initial = cases)
dt_sp <- dt_sp[,"region" := "SP"]
day_limit <- x_lim ## set limit day when pulling data from model
model <- sim %>% select(time, f.num) %>% filter(time <= day_limit)
model <- setDT(model)
model <- model[, "region" := "model"]
model_quar <- sim_quar %>% select(time, f.num) %>% filter(time <= day_limit)
model_quar <- setDT(model_quar)
model_quar <- model_quar[, "region" := "model_quar"]
model_quar$f.num <- model_quar$f.num + dt_sp$f.num[1]
dt_backtest <- rbind(dt_sp, model_quar, fill = T)
## PLOT: PREDICTED VS REALIZED: SP
group.colors <- c(SP = "black", model = "blue", model_quar = "blue")
complabels <- c(model = "Sem quarentena", model_quar = "Simulated results")
y_lim <- max(dt_backtest[ time < day_limit, f.num])
# Plot
ggplot(data = dt_backtest, aes(x = time, y = f.num)) +
geom_line(aes(color = factor(region))) + geom_point(shape=1, alpha = 0.5) +
xlim(1, day_limit) + ylim(NA,y_lim) + scale_color_manual(values=group.colors, labels = complabels) +
scale_y_continuous(label = comma, limits=c(0, y_lim)) +
theme(legend.title=element_blank()) +
labs(title = "Projetado x Realizado, Estado de SP",
x = "Days since beginning of simulation", y = "Deaths") + theme(legend.title=element_blank())
}
economy <- function(pop, x_limit, sim_data1, sim_data2) {
population <- pop
economy_base <- sim_data1 %>% # use only the prevalence columns
select(time, i.num, h.num, f.num, q.num, qi.num) %>%
# examine only the first 100 days since it is all over by
# then using the default parameters
filter(time <= x_limit)
economy_base$qi.perc <- economy_base$qi.num/population
economy_base$i.perc <- economy_base$i.num/population
economy_base$h.perc <- economy_base$h.num/population
economy_base$f.perc <- economy_base$f.num/population
economy_base$q.perc <- economy_base$q.num/population
economy_base$gdp <- 1 - economy_base$qi.perc - economy_base$i.perc - economy_base$h.perc - economy_base$f.perc - economy_base$q.perc*0.5
## Storing the number of infected and fatalities in the next year
economy_sim <- sim_data2 %>% # use only the prevalence columns
select(time, i.num, h.num, f.num, q.num, qi.num) %>%
# examine only the first 100 days since it is all over by
# then using the default parameters
filter(time <= x_limit)
economy_sim$qi.perc <- economy_sim$qi.num/population
economy_sim$i.perc <- economy_sim$i.num/population
economy_sim$h.perc <- economy_sim$h.num/population
economy_sim$f.perc <- economy_sim$f.num/population
economy_sim$q.perc <- economy_sim$q.num/population
economy_sim$gdp <- 1 - economy_sim$qi.perc - economy_sim$i.perc - economy_sim$h.perc - economy_sim$f.perc - economy_sim$q.perc*0.5
economy <- data.frame(economy_base$gdp)
economy <- economy_base %>% select(time, gdp)
economy <- cbind(economy, economy_sim$gdp )
names(economy)[2] <- "gdp_base"
names(economy)[3] <- "gdp_sim"
## Visualizing gdp effect:
economy_plot_df <- economy %>% # use only the prevalence columns
select(time, gdp_base, gdp_sim) %>%
# examine only the first 100 days since it is all over by
# then using the default parameters
filter(time <= 360) %>% pivot_longer(-c(time), names_to = "Groups",
values_to = "count")
# define a standard set of colours to represent compartments
compcols <- c(gdp_base = "black", gdp_sim = "blue" )
complabels <- c(gdp_base = "Baseline", gdp_sim = "Quarantine")
economy_plot_df %>% ggplot(aes(x = time, y = count, colour = Groups)) +
geom_line(size = 1.25, alpha = 0.7) +
scale_colour_manual(values = compcols,
labels = complabels) +
labs(title = "GDP Impact: Simulation vs Non-Quarantine",
x = "Days since beginning of epidemic", y = "GDP") +
theme(legend.position="bottom")
}
# Define UI for app that simulates and draws the curves ----
ui <- shinyUI(fluidPage(
## Google Tag (remove this or substitute the html file with your own google tag)
tags$head(tags$script(HTML("google-analytics.html"))),
# App title ----
titlePanel("Simulador: SEIR + Quarentena + Economia"),
plotOutput(outputId = "plot1"),
fluidRow(column(4, numericInput(inputId = "input_xaxis1",
label = "Input max days in X axis",
min = 1,
max = 720,
value = 360))),
actionButton("go", "Calculate", style="color: white; background-color: blue"),
actionButton("reset", "Reset parameters"),
# Sidebar panel for inputs ----
fluidRow(
column(4,
h4(textOutput(outputId = "r0"), style="color: blue"),
h4(textOutput(outputId = "caption1")),
sliderInput(inputId = "input_lambda",
label = "Daily encounters:",
min = 1,
max = 50,
value = 14),
sliderInput(inputId = "input_rho",
label = "Infection probability (%), given encounter:",
min = 0.1,
max = 5,
step = 0.1,
value = 1.2,
round = T),
sliderInput(inputId = "input_prog",
label = "Number of days: Exposed to Infected",
min = 1,
max = 20,
step = 0.1,
value = 5.2,
round = T),
sliderInput(inputId = "input_rec",
label = "Number of days: Infected to Recovered",
min = 1,
max = 20,
step = 0.1,
value = 14,
round = T),
sliderInput(inputId = "input_hosp",
label = "Probability of hospitalization, given Infection (%)",
min = 0.1,
max = 10,
step = 0.1,
value = 1,
round = T),
sliderInput(inputId = "input_hosprec",
label = "Number of days: Hospitalization to Recovered",
min = 1,
max = 25,
step = 1,
value = 7,
round = T),
sliderInput(inputId = "input_fat",
label = "Probability of Fatality, given Hospitalization (%)",
min = 0.1,
max = 20,
step = 0.1,
value = 2,
round = T)
),
column(4,
h4(textOutput(outputId = "r0_quar"), style="color: blue"),
h4(textOutput(outputId = "caption2")),
sliderInput(inputId = "input_lambda_q",
label = "Daily encounters (in quarantine):",
min = 1,
max = 50,
value = 7),
sliderInput(inputId = "input_quar",
label = "Daily quarantine rate (%)",
min = 0,
max = 10,
step = .5,
value = 3,
round = T),
sliderInput(inputId = "input_quar.i",
label = "Infected to Quarantine rate (%)",
min = 0,
max = 100,
step = 1,
value = 100,
round = T),
sliderInput(inputId = "input_maxquar",
label = "Maximum quarantined population (%)",
min = 0,
max = 100,
step = 1,
value = 50,
round = T),
h4(textOutput(outputId = "caption3")),
numericInput(inputId = "input_pop",
label = "Population",
value = 44000000),
numericInput(inputId = "input_popquar",
label = "Initial population Quarantined",
value = 0),
numericInput(inputId = "input_popinfa",
label = "Initial population Exposed",
value = 5000),
numericInput(inputId = "input_popinf",
label = "Initial population Infected",
value = 2500),
numericInput(inputId = "input_icu",
label = "Number of ICU beds",
value = 100000)
),
column(4,
plotOutput(outputId = "plot2"),
plotOutput(outputId = "plot3")),
textOutput(outputId = "economy_desc")
),
h3(textOutput(outputId = "caption4")),
plotOutput(outputId = "plot4"),
fluidRow(column(4, numericInput(inputId = "input_cases",
label = "Escolha numero de casos iniciais",
min = 1,
max = 10000000,
value = 1)),
column(8,
numericInput(inputId = "input_xaxis",
label = "Ajuste numero maximo de dias no Eixo X",
min = 1,
max = 720,
value = 120))
),
textOutput(outputId = "caption5"),
uiOutput("tab0"),
h3(textOutput(outputId = "caption6")),
uiOutput("tab1"),
uiOutput("tab2")
))
server <- function(input, output, session) {
# 0. Reset sliders to initial values after clicking RESET
observeEvent(input$reset,{
updateSliderInput(session,'input_lambda',value = 14)
updateSliderInput(session,'input_rho',value = 1.2)
updateSliderInput(session,'input_prog',value = 5.2)
updateSliderInput(session,'input_rec',value = 14)
updateSliderInput(session,'input_hosp',value = 1)
updateSliderInput(session,'input_hosprec',value = 7)
updateSliderInput(session,'input_fat',value = 2)
updateSliderInput(session,'input_quar',value = 3)
updateSliderInput(session,'input_quar.i',value = 100)
updateSliderInput(session,'input_maxquar',value = 50)
updateSliderInput(session,'input_lambda_q',value = 7)
})
# 1. Update data after clicking buttom
sim_data_quar <- eventReactive(input$go, {
simulate(
initial = c(
time = 1,
s.num = input$input_pop - input$input_popquar - input$input_popinf - input$input_popinfa ,
e.num = input$input_popinfa,
i.num = input$input_popinf,
q.num = input$input_popquar,
qe.num = 0,
qi.num = 0,
h.num = 0,
r.num = 0,
f.num = 0),
total_time = 720,
## Parameters
lambda = input$input_lambda,
rho_s = input$input_rho/100,
rho_a = input$input_rho/100,
prog.rate = 1/input$input_prog,
rec.rate = 1/input$input_rec,
hosp.rate = input$input_hosp/100,
disch.rate = 1/input$input_hosprec,
fat.rate.base = input$input_fat/100,
quar.rate = input$input_quar/100,
quar.rate.i = input$input_quar.i/100,
max_quar = input$input_maxquar/100,
lambda_q = input$input_lambda_q
)
}, ignoreNULL = FALSE)
sim_data_noquar <- eventReactive(input$go, {
simulate(initial = c(
time = 1,
s.num = input$input_pop,
e.num = input$input_popinfa,
i.num = input$input_popinf,
q.num = 0,
qe.num = 0,
qi.num = 0,
h.num = 0,
r.num = 0,
f.num = 0),
total_time = 720,
## Parameters
lambda = input$input_lambda,
rho_s = input$input_rho/100,
rho_a = input$input_rho/100,
prog.rate = 1/input$input_prog,
rec.rate = 1/input$input_rec,
hosp.rate = input$input_hosp/100,
disch.rate = 1/input$input_hosprec,
fat.rate.base = input$input_fat/100,
quar.rate = 0,
quar.rate.i = 0,
max_quar = input$input_maxquar/100,
lambda_q = input$input_lambda_q)
}, ignoreNULL = FALSE)
# 2. Outputs:
output$r0 <- renderText({ paste("R0 = ",
round( ((input$input_rho/100)*input$input_lambda/(1/input$input_prog))*
(1+(1/input$input_prog)/(1/input$input_rec +
input$input_hosp/100*(1-1/input$input_hosprec)*input$input_fat/100 )), 3)) })
output$r0_quar <- renderText({ paste("Quarantine R0 = ",
round( ((input$input_rho/100)*input$input_lambda_q/(1/input$input_prog))*
(1+(1/input$input_prog)/(1/input$input_rec +
input$input_hosp/100*(1-1/input$input_hosprec)*input$input_fat/100 )), 3)) })
output$caption1 <- renderText({ "Medical Parameters" })
output$caption2 <- renderText({ "Quarantine Parameters" })
output$caption3 <- renderText({ "Population Parameters" })
output$caption4 <- renderText({ "Comparativo fatalidades projetadas x realizadas para o estado de SP:" })
output$plot1 <- renderPlot({
plot1(x_limit = input$input_xaxis1 ,
sim_data = sim_data_quar())
})
output$plot2 <- renderPlot({
plot2(x_limit = input$input_xaxis1,
sim_data = sim_data_quar(), icu_beds = input$input_icu)
})
output$plot3 <- renderPlot({
economy(x_limit = input$input_xaxis1,
pop = input$input_pop,
sim_data1 = sim_data_noquar(),
sim_data2 = sim_data_quar())
})
output$plot4 <- renderPlot({
plot_backtest(cases = input$input_cases , sim_data1 = sim_data_noquar(),
sim_data2 = sim_data_quar(), input$input_xaxis)
})
output$caption5 <- renderText({ "Dados de SP são da Secretaria de Estado da Saúde de São Paulo (SES)" })
output$caption6 <- renderText({ "References" })
url1 <- a("Tim Churches Health Data Science Blog", href="https://timchurches.github.io/blog/posts/2020-03-18-modelling-the-effects-of-public-health-interventions-on-covid-19-transmission-part-2/")
url2 <- a("An SEIR Infectious Disease Model with Testing and Conditional Quarantine", href = "https://www.nber.org/papers/w26901")
url3 <- a("post", href="http://academicosdobar.site/posts/2020-06-03-simulador-covid-quarentena-economia/")
output$tab0 <- renderUI({
tagList("Veja comentários acerca da incerteza da previsão neste " , url3)
})
output$tab1 <- renderUI({
tagList("SEIR model:" , url1)
})
output$tab2 <- renderUI({
tagList("Quarantine and GDP calculation: Berger, Herkenhoff & Mongey (2020)" , url2)
})
output$economy_desc <- renderText({
"Comments: A worker in quarantine is 50% less productive than a non-quarantine worker. An infected worker does not produce (see references)."
})
}
shinyApp(ui, server) |
%% File Name: tam.threshold.Rd
%% File Version: 2.21
\name{tam.threshold}
\alias{tam.threshold}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Calculation of Thurstonian Thresholds
}
\description{
This function estimates Thurstonian thresholds for item category
parameters of (generalized) partial credit models (see Details).
}
\usage{
tam.threshold(tamobj, prob.lvl=0.5)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{tamobj}{
Object of class \code{tam}
}
\item{prob.lvl}{
A numeric specifying the probability level of the threshold.
The default is \code{prob.lvl=0.5}.
}
}
\details{
This function only works appropriately for unidimensional models
or between item multidimensional models.
}
\value{
A data frame with Thurstonian thresholds. Rows correspond to items and
columns to item steps.
}
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
%\references{
%% ~put references to the literature/web site here ~
%}
%\author{
%% \pkg{TAM} authors
%}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
See the \pkg{WrightMap} package and Example 3 for creating Wright maps
with fitted models in \pkg{TAM}, see
\code{\link[WrightMap:wrightMap]{wrightMap}}.
}
\examples{
#############################################################################
# EXAMPLE 1: ordered data - Partial credit model
#############################################################################
data( data.gpcm )
# Model 1: partial credit model
mod1 <- TAM::tam.mml( resp=data.gpcm,control=list( maxiter=200) )
summary(mod1)
## Item Parameters -A*Xsi
## item N M AXsi_.Cat1 AXsi_.Cat2 AXsi_.Cat3 B.Cat1.Dim1 B.Cat2.Dim1 B.Cat3.Dim1
## 1 Comfort 392 0.880 -1.302 1.154 3.881 1 2 3
## 2 Work 392 1.278 -1.706 -0.847 0.833 1 2 3
## 3 Benefit 392 1.163 -1.233 -0.404 1.806 1 2 3
# Calculation of Thurstonian thresholds
TAM::tam.threshold(mod1)
## Cat1 Cat2 Cat3
## Comfort -1.325226 2.0717468 3.139801
## Work -1.777679 0.6459045 1.971222
## Benefit -1.343536 0.7491760 2.403168
\dontrun{
#############################################################################
# EXAMPLE 2: Multidimensional model data.math
#############################################################################
library(sirt)
data(data.math, package="sirt")
dat <- data.math$data
# select items
items1 <- grep("M[A-D]", colnames(dat), value=TRUE)
items2 <- grep("M[H-I]", colnames(dat), value=TRUE)
# select dataset
dat <- dat[ c(items1,items2)]
# create Q-matrix
Q <- matrix( 0, nrow=ncol(dat), ncol=2 )
Q[ seq(1,length(items1) ), 1 ] <- 1
Q[ length(items1) + seq(1,length(items2) ), 2 ] <- 1
# fit two-dimensional model
mod1 <- TAM::tam.mml( dat, Q=Q )
# compute thresholds (specify a probability level of .625)
tmod1 <- TAM::tam.threshold( mod1, prob.lvl=.625 )
#############################################################################
# EXAMPLE 3: Creating Wright maps with the WrightMap package
#############################################################################
library(WrightMap)
# For conducting Wright maps in combination with TAM, see
# http://wrightmap.org/post/100850738072/using-wrightmap-with-the-tam-package
data(sim.rasch)
dat <- sim.rasch
# estimate Rasch model in TAM
mod1 <- TAM::tam.mml(dat)
summary(mod1)
#--- A: creating a Wright map with WLEs
# compute WLE
wlemod1 <- TAM::tam.wle(mod1)$theta
# extract thresholds
tmod1 <- TAM::tam.threshold(mod1)
# create Wright map
WrightMap::wrightMap( thetas=wlemod1, thresholds=tmod1, label.items.srt=-90)
#--- B: creating a Wright Map with population distribution
# extract ability distribution and replicate observations
uni.proficiency <- rep( mod1$theta[,1], round( mod1$pi.k * mod1$ic$n) )
# draw WrightMap
WrightMap::wrightMap( thetas=uni.proficiency, thresholds=tmod1, label.items.rows=3)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Thurstonian thresholds}
%% \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/tam.threshold.Rd | no_license | markdly/TAM | R | false | false | 4,292 | rd | %% File Name: tam.threshold.Rd
%% File Version: 2.21
\name{tam.threshold}
\alias{tam.threshold}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Calculation of Thurstonian Thresholds
}
\description{
This function estimates Thurstonian thresholds for item category
parameters of (generalized) partial credit models (see Details).
}
\usage{
tam.threshold(tamobj, prob.lvl=0.5)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{tamobj}{
Object of class \code{tam}
}
\item{prob.lvl}{
A numeric specifying the probability level of the threshold.
The default is \code{prob.lvl=0.5}.
}
}
\details{
This function only works appropriately for unidimensional models
or between item multidimensional models.
}
\value{
A data frame with Thurstonian thresholds. Rows correspond to items and
columns to item steps.
}
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
%\references{
%% ~put references to the literature/web site here ~
%}
%\author{
%% \pkg{TAM} authors
%}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
See the \pkg{WrightMap} package and Example 3 for creating Wright maps
with fitted models in \pkg{TAM}, see
\code{\link[WrightMap:wrightMap]{wrightMap}}.
}
\examples{
#############################################################################
# EXAMPLE 1: ordered data - Partial credit model
#############################################################################
data( data.gpcm )
# Model 1: partial credit model
mod1 <- TAM::tam.mml( resp=data.gpcm,control=list( maxiter=200) )
summary(mod1)
## Item Parameters -A*Xsi
## item N M AXsi_.Cat1 AXsi_.Cat2 AXsi_.Cat3 B.Cat1.Dim1 B.Cat2.Dim1 B.Cat3.Dim1
## 1 Comfort 392 0.880 -1.302 1.154 3.881 1 2 3
## 2 Work 392 1.278 -1.706 -0.847 0.833 1 2 3
## 3 Benefit 392 1.163 -1.233 -0.404 1.806 1 2 3
# Calculation of Thurstonian thresholds
TAM::tam.threshold(mod1)
## Cat1 Cat2 Cat3
## Comfort -1.325226 2.0717468 3.139801
## Work -1.777679 0.6459045 1.971222
## Benefit -1.343536 0.7491760 2.403168
\dontrun{
#############################################################################
# EXAMPLE 2: Multidimensional model data.math
#############################################################################
library(sirt)
data(data.math, package="sirt")
dat <- data.math$data
# select items
items1 <- grep("M[A-D]", colnames(dat), value=TRUE)
items2 <- grep("M[H-I]", colnames(dat), value=TRUE)
# select dataset
dat <- dat[ c(items1,items2)]
# create Q-matrix
Q <- matrix( 0, nrow=ncol(dat), ncol=2 )
Q[ seq(1,length(items1) ), 1 ] <- 1
Q[ length(items1) + seq(1,length(items2) ), 2 ] <- 1
# fit two-dimensional model
mod1 <- TAM::tam.mml( dat, Q=Q )
# compute thresholds (specify a probability level of .625)
tmod1 <- TAM::tam.threshold( mod1, prob.lvl=.625 )
#############################################################################
# EXAMPLE 3: Creating Wright maps with the WrightMap package
#############################################################################
library(WrightMap)
# For conducting Wright maps in combination with TAM, see
# http://wrightmap.org/post/100850738072/using-wrightmap-with-the-tam-package
data(sim.rasch)
dat <- sim.rasch
# estimate Rasch model in TAM
mod1 <- TAM::tam.mml(dat)
summary(mod1)
#--- A: creating a Wright map with WLEs
# compute WLE
wlemod1 <- TAM::tam.wle(mod1)$theta
# extract thresholds
tmod1 <- TAM::tam.threshold(mod1)
# create Wright map
WrightMap::wrightMap( thetas=wlemod1, thresholds=tmod1, label.items.srt=-90)
#--- B: creating a Wright Map with population distribution
# extract ability distribution and replicate observations
uni.proficiency <- rep( mod1$theta[,1], round( mod1$pi.k * mod1$ic$n) )
# draw WrightMap
WrightMap::wrightMap( thetas=uni.proficiency, thresholds=tmod1, label.items.rows=3)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Thurstonian thresholds}
%% \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
rm(list=ls())
library('randomForest')
data = read.table('training.csv', ',',header=T);
#####
set.seed(47)
data = na.omit(data)
rf = randomForest(data[28], data[,27], proximity=TRUE, importance=TRUE)
#####
imp <- importance(rf)
impvar <- rownames(imp)[order(imp[, 1], decreasing=TRUE)]
op <- par(mfrow=c(2, 3))
for (i in seq_along(impvar)) {
partialPlot(rf, data, impvar[i], xlab=impvar[i],
main=paste("dependencia parcial en: ", impvar[i]),
ylim=c(30, 70))
}
par(op)
plot(rf)
| /garbage/randomForest.r | permissive | j3nnn1/weekend_project | R | false | false | 491 | r |
rm(list=ls())
library('randomForest')
data = read.table('training.csv', ',',header=T);
#####
set.seed(47)
data = na.omit(data)
rf = randomForest(data[28], data[,27], proximity=TRUE, importance=TRUE)
#####
imp <- importance(rf)
impvar <- rownames(imp)[order(imp[, 1], decreasing=TRUE)]
op <- par(mfrow=c(2, 3))
for (i in seq_along(impvar)) {
partialPlot(rf, data, impvar[i], xlab=impvar[i],
main=paste("dependencia parcial en: ", impvar[i]),
ylim=c(30, 70))
}
par(op)
plot(rf)
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
files_list <- list.files(directory, full.names = TRUE)
dat <- data.frame()
for(i in id){
dat <- rbind(dat, read.csv(files_list[i]))
}
f_mean <- mean(dat[,pollutant], na.rm = TRUE)
f_mean
} | /Assig1_rPROG/pollutantmean.R | no_license | smazcw3/R-Programming | R | false | false | 769 | r | pollutantmean <- function(directory, pollutant, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
files_list <- list.files(directory, full.names = TRUE)
dat <- data.frame()
for(i in id){
dat <- rbind(dat, read.csv(files_list[i]))
}
f_mean <- mean(dat[,pollutant], na.rm = TRUE)
f_mean
} |
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/descriptive_functions.R
\name{print.descriptive.jonas}
\alias{print.descriptive.jonas}
\title{Print descriptive jonas}
\usage{
\method{print}{descriptive.jonas}(x)
}
\description{
Print descriptive jonas
}
| /man/print.descriptive.jonas.Rd | no_license | etb/MONECA | R | false | false | 293 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/descriptive_functions.R
\name{print.descriptive.jonas}
\alias{print.descriptive.jonas}
\title{Print descriptive jonas}
\usage{
\method{print}{descriptive.jonas}(x)
}
\description{
Print descriptive jonas
}
|
## archivist package for R
##
#' @title Show Artifact's Session Info
#'
#' @description
#' \code{asession} extracts artifact's session info. This allows to check in what conditions
#' the artifact was created.
#'
#' @param md5hash One of the following (see \link{aread}):
#'
#' A character vector which elements are consisting of at least three components separated with '/': Remote user name, Remote repository and name of the artifact (MD5 hash) or it's abbreviation.
#'
#' MD5 hashes of artifacts in current local default directory or its abbreviations.
#'
#' @return An object of the class \code{session_info}.
#'
#' @author
#' Przemyslaw Biecek, \email{przemyslaw.biecek@@gmail.com}
#'
#' @template roxlate-references
#' @template roxlate-contact
#'
#' @examples
#' \dontrun{
#' setLocalRepo(system.file("graphGallery", package = "archivist"))
#' asession("2a6e492cb6982f230e48cf46023e2e4f")
#'
#' # no session info
#' asession("pbiecek/graphGallery/2a6e492cb6982f230e48cf46023e2e4f")
#' # nice session info
#' asession("pbiecek/graphGallery/7f3453331910e3f321ef97d87adb5bad")
#' }
#' @family archivist
#' @rdname asession
#' @export
asession <- function( md5hash = NULL) {
elements <- strsplit(md5hash, "/")[[1]]
stopifnot( length(elements) >= 3 | length(elements) == 1)
if (is.url(md5hash)) {
tags <- getTagsRemoteByUrl(tail(elements,1),
paste(elements[-length(elements)], collapse="/"),
tag = "")
tagss <- grep(tags, pattern="^session_info:", value = TRUE)
if (length(tagss) == 0) {
warning(paste0("No session info archived for ", md5hash))
return(NA)
}
return(loadFromLocalRepo(gsub(tagss[1], pattern = "^session_info:", replacement = ""),
repoDir = paste(elements[-length(elements)], collapse="/"),
value = TRUE))
} else {
if (length(elements) == 1) {
# local directory
tags <- getTagsLocal(md5hash, tag = "")
tagss <- grep(tags, pattern="^session_info:", value = TRUE)
if (length(tagss) == 0) {
warning(paste0("No session info archived for ", md5hash))
return(NA)
}
return(loadFromLocalRepo(gsub(tagss[1], pattern = "^session_info:", replacement = ""), value = TRUE))
} else {
# Remote directory
tags <- getTagsRemote(tail(elements,1), repo = elements[2],
subdir = ifelse(length(elements) > 3, paste(elements[3:(length(elements)-1)], collapse="/"), "/"),
user = elements[1], tag = "")
tagss <- grep(tags, pattern="^session_info:", value = TRUE)
if (length(tagss) == 0) {
warning(paste0("No session info archived for ", md5hash))
return(NA)
}
return(loadFromRemoteRepo(gsub(tagss[1], pattern = "^session_info:", replacement = ""),
repo = elements[2],
subdir = ifelse(length(elements) > 3, paste(elements[3:(length(elements)-1)], collapse="/"), "/"),
user = elements[1],
value = TRUE))
}
}
}
| /R/asession.R | no_license | pbiecek/archivist | R | false | false | 3,186 | r | ## archivist package for R
##
#' @title Show Artifact's Session Info
#'
#' @description
#' \code{asession} extracts artifact's session info. This allows to check in what conditions
#' the artifact was created.
#'
#' @param md5hash One of the following (see \link{aread}):
#'
#' A character vector which elements are consisting of at least three components separated with '/': Remote user name, Remote repository and name of the artifact (MD5 hash) or it's abbreviation.
#'
#' MD5 hashes of artifacts in current local default directory or its abbreviations.
#'
#' @return An object of the class \code{session_info}.
#'
#' @author
#' Przemyslaw Biecek, \email{przemyslaw.biecek@@gmail.com}
#'
#' @template roxlate-references
#' @template roxlate-contact
#'
#' @examples
#' \dontrun{
#' setLocalRepo(system.file("graphGallery", package = "archivist"))
#' asession("2a6e492cb6982f230e48cf46023e2e4f")
#'
#' # no session info
#' asession("pbiecek/graphGallery/2a6e492cb6982f230e48cf46023e2e4f")
#' # nice session info
#' asession("pbiecek/graphGallery/7f3453331910e3f321ef97d87adb5bad")
#' }
#' @family archivist
#' @rdname asession
#' @export
asession <- function( md5hash = NULL) {
elements <- strsplit(md5hash, "/")[[1]]
stopifnot( length(elements) >= 3 | length(elements) == 1)
if (is.url(md5hash)) {
tags <- getTagsRemoteByUrl(tail(elements,1),
paste(elements[-length(elements)], collapse="/"),
tag = "")
tagss <- grep(tags, pattern="^session_info:", value = TRUE)
if (length(tagss) == 0) {
warning(paste0("No session info archived for ", md5hash))
return(NA)
}
return(loadFromLocalRepo(gsub(tagss[1], pattern = "^session_info:", replacement = ""),
repoDir = paste(elements[-length(elements)], collapse="/"),
value = TRUE))
} else {
if (length(elements) == 1) {
# local directory
tags <- getTagsLocal(md5hash, tag = "")
tagss <- grep(tags, pattern="^session_info:", value = TRUE)
if (length(tagss) == 0) {
warning(paste0("No session info archived for ", md5hash))
return(NA)
}
return(loadFromLocalRepo(gsub(tagss[1], pattern = "^session_info:", replacement = ""), value = TRUE))
} else {
# Remote directory
tags <- getTagsRemote(tail(elements,1), repo = elements[2],
subdir = ifelse(length(elements) > 3, paste(elements[3:(length(elements)-1)], collapse="/"), "/"),
user = elements[1], tag = "")
tagss <- grep(tags, pattern="^session_info:", value = TRUE)
if (length(tagss) == 0) {
warning(paste0("No session info archived for ", md5hash))
return(NA)
}
return(loadFromRemoteRepo(gsub(tagss[1], pattern = "^session_info:", replacement = ""),
repo = elements[2],
subdir = ifelse(length(elements) > 3, paste(elements[3:(length(elements)-1)], collapse="/"), "/"),
user = elements[1],
value = TRUE))
}
}
}
|
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", col.names=c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), colClasses= "character")
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
data <- subset(data, Date >= as.Date("01/02/2007", format="%d/%m/%Y"))
data <- subset(data, Date <= as.Date("02/02/2007", format="%d/%m/%Y"))
data$Time <- paste(data$Date, data$Time)
data$Time <- strptime(data$Time, format = "%Y-%m-%d %H:%M:%S")
data <- subset(data, Global_active_power != "?")
data <- subset(data, Sub_metering_1 != "?")
data <- subset(data, Sub_metering_2 != "?")
data <- subset(data, Sub_metering_3 != "?")
data$Global_active_power <- as.numeric(data$Global_active_power)
data$Sub_metering_1 <- as.numeric(data$Sub_metering_1)
data$Sub_metering_2 <- as.numeric(data$Sub_metering_2)
data$Sub_metering_3 <- as.numeric(data$Sub_metering_3)
with(data, plot(Time, Sub_metering_1, ylab = "Energy sub metering", type = "n"))
with(data, points(Time, Sub_metering_1, type="l", col="black"))
with(data, points(Time, Sub_metering_2, type="l", col="red"))
with(data, points(Time, Sub_metering_3, type="l", col="blue"))
legend("topright", pch = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png, file = "plot3.png")
dev.off() | /plot3.R | no_license | blockee/ExData_Plotting1 | R | false | false | 1,411 | r | data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", col.names=c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), colClasses= "character")
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
data <- subset(data, Date >= as.Date("01/02/2007", format="%d/%m/%Y"))
data <- subset(data, Date <= as.Date("02/02/2007", format="%d/%m/%Y"))
data$Time <- paste(data$Date, data$Time)
data$Time <- strptime(data$Time, format = "%Y-%m-%d %H:%M:%S")
data <- subset(data, Global_active_power != "?")
data <- subset(data, Sub_metering_1 != "?")
data <- subset(data, Sub_metering_2 != "?")
data <- subset(data, Sub_metering_3 != "?")
data$Global_active_power <- as.numeric(data$Global_active_power)
data$Sub_metering_1 <- as.numeric(data$Sub_metering_1)
data$Sub_metering_2 <- as.numeric(data$Sub_metering_2)
data$Sub_metering_3 <- as.numeric(data$Sub_metering_3)
with(data, plot(Time, Sub_metering_1, ylab = "Energy sub metering", type = "n"))
with(data, points(Time, Sub_metering_1, type="l", col="black"))
with(data, points(Time, Sub_metering_2, type="l", col="red"))
with(data, points(Time, Sub_metering_3, type="l", col="blue"))
legend("topright", pch = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png, file = "plot3.png")
dev.off() |
setwd("~/") #set working directory
importedfilename <- read.table("rui.txt", header=TRUE, fill=TRUE)
pausetimes <- diff(as.numeric(importedfilename$Time))
pausetimeframe <- data.frame(diff = (pausetimes))
| /ruidifferences.R | no_license | Cathy-L/Keystroke-R-codes | R | false | false | 209 | r | setwd("~/") #set working directory
importedfilename <- read.table("rui.txt", header=TRUE, fill=TRUE)
pausetimes <- diff(as.numeric(importedfilename$Time))
pausetimeframe <- data.frame(diff = (pausetimes))
|
### This file contains global settings for forecasting models and frequently used user-defined functions
### for forcasting evaluation.
### 0. Loading data
load("Panel.RData")
### 1. Global settings---------------------
AGGREGATE = "A8"
STATES = states48
VARNAME = "Total"
lVARNAME = paste0("l" , VARNAME)
dlVARNAME = paste0("dl", VARNAME)
LdlVARNAME = paste0("Ldl", VARNAME)
L2dlVARNAME = paste0("L2dl", VARNAME)
L3dlVARNAME = paste0("L3dl", VARNAME)
LlVARNAME = paste0("Ll", VARNAME)
LINAME = c("Ldlunemply_S")
train.START = 1 # 2000:12
train.END = 75 # 2006:12
test.START = 76 # 2007:1
max.INDEX = max(Panel$index)
### 2. Forecasting leading indicator.----------------------
# Uncomment this piece of code when reconstruction of the leading indicator is needed.
#
# data.unemply_U = subset(Panel, State %in% c(AGGREGATE, STATES), select = c("index", "State", "Ldlunemply_U", "dlunemply_U"))
# data.unemply_S = subset(Panel, State %in% c(AGGREGATE, STATES), select = c("index", "State", "Ldlunemply_S", "dlunemply_S"))
#
# data.unemply_U[,c("H1dlunemply_U", "H2dlunemply_U","H3dlunemply_U", "H4dlunemply_U","H5dlunemply_U")] = NA
# data.unemply_S[,c("H1dlunemply_S", "H2dlunemply_S","H3dlunemply_S", "H4dlunemply_S","H5dlunemply_S")] = NA
#
#
# for(i in c(AGGREGATE, STATES)){
# for(j in (train.END - 20):max.INDEX){
# data.unemply_U[data.unemply_U$State == i & data.unemply_U$index == j, c("H1dlunemply_U", "H2dlunemply_U","H3dlunemply_U", "H4dlunemply_U","H5dlunemply_U")] =
# forecast(auto.arima(ts(subset(data.unemply_U, State == i & index<= j, "dlunemply_U"), f = 12), seasonal = TRUE, d= 0 , D= 0), 5)$mean
# print(paste(i,j))
# }
# }
#
# for(i in c(AGGREGATE, STATES)){
# for(j in (train.END - 20):max.INDEX){
# data.unemply_S[data.unemply_S$State == i & data.unemply_S$index == j, c("H1dlunemply_S", "H2dlunemply_S","H3dlunemply_S", "H4dlunemply_S","H5dlunemply_S")] =
# forecast(auto.arima(ts(subset(data.unemply_S, State == i & index<= j, "dlunemply_S"), f = 12), seasonal = FALSE, d= 0 , D= 0), 5)$mean
# print(paste(i,j))
# }
# }
# save(data.unemply_U, data.unemply_S, file = "fcast.unemply.RData")
load("fcast.unemply.RData")
### 3. Functions---------------
## Functins to compute forecast accuracy.
fcastAcc.AG = function(Test.data = Test.AG, start = min(Test.data$index), end = max(Test.data$index), panel = FALSE){
## This function compute measures of forecast accuracy of national level forecasts.
## Input:
# Test.data: data frame containing ture(ture) and forecasted series(fcasth?, fcasth?a, ? = 1,3,6), -
# the default value is "Test.AG" data frame from the forecast step.
# start : index from which the measures are calculated. default is the min index.
# end : index to which the measures are calculated. default is the max index.
## Output: a matrix containing the measures.
Test.data = subset(Test.data, index>=start & index <= end)
if(!panel){
measure.AG = rbind(with(Test.data, accuracy(fcasth1, true)),
with(Test.data, accuracy(fcasth1a, true)),
with(Test.data, accuracy(fcasth3, true)),
with(Test.data, accuracy(fcasth3a, true)),
with(Test.data, accuracy(fcasth6, true)),
with(Test.data, accuracy(fcasth6a, true))
)
rownames(measure.AG) = c("Directh1", "Aggh1", "Directh3", "Aggh3", "Directh6", "Aggh6")
#print(paste(start, "-", end))
}
if(panel){
measure.AG = rbind(with(Test.data, accuracy(fcasth1a, true)),
with(Test.data, accuracy(fcasth3a, true)),
with(Test.data, accuracy(fcasth6a, true))
)
rownames(measure.AG) = c("Aggh1", "Aggh3", "Aggh6")}
attr(measure.AG, "span") = paste(paste(start, "-", end))
measure.AG
}
fcastAcc.state = function(Test.data = Test.state, start = min(Test.data$index), end = max(Test.data$index)){
## This function compute measures of forecast accuracy of state level forecasts.
## Input:
# Test.data: data frame containing ture(ture) and forecasted series(fcasth?, fcasth?a, ? = 1,3,6), -
# the default value is "Test.state" data frame from the forecast step.
# start : index from which the measures are calculated. default is the min index.
# end : index to which the measures are calculated. default is the max index.
## Output: A matrix containing the measures.
Test.data = subset(Test.data, index>=start & index <= end)
STATES = levels(factor(Test.data$State))
measure.stateh1 = data.frame(State = STATES, RMSE = NA, MAPE = NA)
for(i in STATES)
measure.stateh1[measure.stateh1$State == i, c("RMSE", "MAPE")] = with(Test.data[Test.data$State == i,], accuracy(fcasth1, true))[,c("RMSE", "MAPE")]
measure.stateh3 = data.frame(State = STATES, RMSE = NA, MAPE = NA)
for(i in STATES)
measure.stateh3[measure.stateh3$State == i, c("RMSE", "MAPE")] = with(Test.data[Test.data$State == i,], accuracy(fcasth3, true))[,c("RMSE", "MAPE")]
measure.stateh6 = data.frame(State = STATES, RMSE = NA, MAPE = NA)
for(i in STATES)
measure.stateh6[measure.stateh6$State == i, c("RMSE", "MAPE")] = with(Test.data[Test.data$State == i,], accuracy(fcasth6, true))[,c("RMSE", "MAPE")]
measure.state = cbind(measure.stateh1, measure.stateh3[-1], measure.stateh6[-1])
names(measure.state)[-1] = c("RMSEh1", "MAPEh1", "RMSEh3", "MAPEh3", "RMSEh6", "MAPEh6")
attr(measure.state, "span") = paste(start, "-", end)
measure.state
}
fcastAcc.sum = function(Test.data = Test.state, start = min(Test.data$index), end = max(Test.data$index)){
## This function compute OVERALL measures of forecast accuracy of state level forecasts.
## Input:
# Test.data: data frame containing ture(ture) and forecasted series(fcasth?, fcasth?a, ? = 1,3,6), -
# the default value is "Test.state" data frame from the forecast step.
# start : index from which the measures are calculated. default is the min index.
# end : index to which the measures are calculated. default is the max index.
## Output: A data.frame containing the measures.
Test.data = subset(Test.data, index>=start & index <= end)
measure.state = fcastAcc.state(Test.data, start, end)
measure.sumh1 = c(RMSE.All = sqrt(mean((Test.data$true - Test.data$fcasth1)^2)), # RMSE with all states
MAPE.All = mean(abs(Test.data$true - Test.data$fcasth1)/abs(Test.data$true))*100, # MAPE with all states
RMSE.Md = median(measure.state$RMSEh1),
RMSE.Max = max(measure.state$RMSEh1),
RMSE.Min = min(measure.state$RMSEh1),
MAPE.Md = median(measure.state$MAPEh1),
MAPE.Max = max(measure.state$MAPEh1),
MAPE.Min = min(measure.state$MAPEh1))
measure.sumh3 = c(RMSE.All = sqrt(mean((Test.data$true - Test.data$fcasth3)^2, na.rm = TRUE)), # RMSE with all states
MAPE.All = mean(abs(Test.data$true - Test.data$fcasth3)/abs(Test.data$true), na.rm = TRUE)*100, # MAPE with all states
RMSE.Md = median(measure.state$RMSEh3),
RMSE.Max = max(measure.state$RMSEh3),
RMSE.Min = min(measure.state$RMSEh3),
MAPE.Md = median(measure.state$MAPEh3),
MAPE.Max = max(measure.state$MAPEh3),
MAPE.Min = min(measure.state$MAPEh3))
measure.sumh6 = c(RMSE.All = sqrt(mean((Test.data$true - Test.data$fcasth6)^2, na.rm = TRUE)), # RMSE with all states
MAPE.All = mean(abs(Test.data$true - Test.data$fcasth6)/abs(Test.data$true), na.rm = TRUE)*100, # MAPE with all states
RMSE.Md = median(measure.state$RMSEh6),
RMSE.Max = max(measure.state$RMSEh6),
RMSE.Min = min(measure.state$RMSEh6),
MAPE.Md = median(measure.state$MAPEh6),
MAPE.Max = max(measure.state$MAPEh6),
MAPE.Min = min(measure.state$MAPEh6))
measure.sum = rbind(measure.sumh1, measure.sumh3, measure.sumh6)
rownames(measure.sum) = c("Overallh1", "Overallh3", "Overallh6")
attr(measure.sum, "span") = paste(start, "-", end)
measure.sum
}
# Function for plotting national level forecasts
plot.AG = function(data, h, model = model.name, colors = c("gray40", "blue", "red"),
shape = c(21,20, 18), Lshape = c("dotted","dotted", "dotted"), panel = FALSE){
## Data must contain index, true, fcast1,3,6.
## The input must be the data frame Test.AG
#data = Test.AG; h =1; model.name = model.name; colors = c("gray40", "blue", "red");
#shape = c(21,20, 19); Lshape = c("dotted","dotted", "dotted")
if(!panel){
data = na.omit(data[,c("index", "true", paste0("fcasth", h), paste0("fcasth", h, "a"))])
if(is.factor(data$index)) start.index = min(as.numeric(levels(data$index)[data$index])) else
if(is.numeric(data$index)) start.index = min(data$index)
start.Year = Panel[Panel$State == Aggregate & Panel$index == start.index, "Cal.Year"]
start.Month = Panel[Panel$State == Aggregate & Panel$index == start.index, "Month"]
true = ts(data[, "true"], start = c(start.Year, start.Month), f = 12)
fcast = ts(data[, paste0("fcasth", h)], start = c(start.Year, start.Month), f = 12)
fcasta = ts(data[, paste0("fcasth", h, "a")], start = c(start.Year, start.Month), f = 12)
par(mar = c(4,4,3,1)+ 0.1)
plot( true, type = "b", lty = Lshape[1], col = colors[1], pch = shape[1], xlab = "", ylab = "",
main = paste("Forecasting National Level Applications:", model, "horizon =", h)
)
lines(fcast, type = "b", lty = Lshape[2], col = colors[2], pch = shape[2])
lines(fcasta, type = "b", lty = Lshape[3], col = colors[3], pch = shape[3])
#abline(h = 0, , lty = "dotted", col = "grey")
legend("topleft", c("True", "Direct", "Aggregated"), lty = Lshape, col = colors, pch = shape)
}
####
if(panel){
data = na.omit(data[,c("index", "true", paste0("fcasth", h, "a"))])
if(is.factor(data$index)) start.index = min(as.numeric(levels(data$index)[data$index])) else
if(is.numeric(data$index)) start.index = min(data$index)
start.Year = Panel[Panel$State == Aggregate & Panel$index == start.index, "Cal.Year"]
start.Month = Panel[Panel$State == Aggregate & Panel$index == start.index, "Month"]
true = ts(data[, "true"], start = c(start.Year, start.Month), f = 12)
fcasta = ts(data[, paste0("fcasth", h, "a")], start = c(start.Year, start.Month), f = 12)
par(mar = c(4,4,3,1)+ 0.1)
plot( true, type = "b", lty = Lshape[1], col = colors[1], pch = shape[1], xlab = "", ylab = "",
main = paste("Forecasting National Level Applications:", model, "horizon =", h)
)
lines(fcasta, type = "b", lty = Lshape[3], col = colors[3], pch = shape[3])
#abline(h = 0, , lty = "dotted", col = "grey")
legend("topleft", c("True", "Aggregated"), lty = Lshape[c(1,3)], col = colors[c(1,3)], pch = shape[c(1,3)])
}
}
# Function for plotting state level forecasts
plot.state = function(data, h, state, model = model.name, colors = c("gray40", "blue", "red"),
shape = c(21,20, 18), Lshape = c("dotted","dotted", "dotted")){
## Data must contain index, true, fcast1,3,6.
## The input must be the data frame Test.state[Test.state == i, ]
data = na.omit(data[,c("index", "true", paste0("fcasth", h))])
if(is.factor(data$index)) start.index = min(as.numeric(levels(data$index)[data$index])) else
if(is.numeric(data$index)) start.index = min(data$index)
start.Year = Panel[Panel$State == Aggregate & Panel$index == start.index, "Cal.Year"]
start.Month = Panel[Panel$State == Aggregate & Panel$index == start.index, "Month"]
true = ts(data[, "true"], start = c(start.Year, start.Month), f = 12)
fcast = ts(data[, paste0("fcasth", h)], start = c(start.Year, start.Month), f = 12)
par(mar = c(4,4,3,1)+ 0.1)
plot( true, type = "b", lty = Lshape[1], col = colors[1], pch = shape[1], xlab = "", ylab = "",
main = paste("Forecasting State Level Applications:", model, "horizon =", h, state))
lines(fcast, type = "b", lty = Lshape[2], col = colors[2], pch = shape[2])
#abline(h = 0, , lty = "dotted", col = "grey")
legend("topleft", c("True", "Forecast"), lty = Lshape, col = colors, pch = shape)
}
## Defining conversion functions between index and date
i2d = function(i, i.min = 1, i.max = max.INDEX, start = c(2000,10), f = 12){
t = ts(i.min:max.INDEX, start = c(2000,10), f = 12)
c(trunc(time(t))[t == i],cycle(t)[t == i])
}
d2i = function(y,m, i.min = 1, i.max = max.INDEX, start = c(2000,10), f = 12){
t = ts(i.min:max.INDEX, start = c(2000,10), f = 12)
as.numeric(window(t, start = c(y,m), end = c(y,m)))
}
| /Forecasting.main.R | no_license | yimengyin16/DI-seasonality | R | false | false | 13,191 | r | ### This file contains global settings for forecasting models and frequently used user-defined functions
### for forcasting evaluation.
### 0. Loading data
load("Panel.RData")
### 1. Global settings---------------------
AGGREGATE = "A8"
STATES = states48
VARNAME = "Total"
lVARNAME = paste0("l" , VARNAME)
dlVARNAME = paste0("dl", VARNAME)
LdlVARNAME = paste0("Ldl", VARNAME)
L2dlVARNAME = paste0("L2dl", VARNAME)
L3dlVARNAME = paste0("L3dl", VARNAME)
LlVARNAME = paste0("Ll", VARNAME)
LINAME = c("Ldlunemply_S")
train.START = 1 # 2000:12
train.END = 75 # 2006:12
test.START = 76 # 2007:1
max.INDEX = max(Panel$index)
### 2. Forecasting leading indicator.----------------------
# Uncomment this piece of code when reconstruction of the leading indicator is needed.
#
# data.unemply_U = subset(Panel, State %in% c(AGGREGATE, STATES), select = c("index", "State", "Ldlunemply_U", "dlunemply_U"))
# data.unemply_S = subset(Panel, State %in% c(AGGREGATE, STATES), select = c("index", "State", "Ldlunemply_S", "dlunemply_S"))
#
# data.unemply_U[,c("H1dlunemply_U", "H2dlunemply_U","H3dlunemply_U", "H4dlunemply_U","H5dlunemply_U")] = NA
# data.unemply_S[,c("H1dlunemply_S", "H2dlunemply_S","H3dlunemply_S", "H4dlunemply_S","H5dlunemply_S")] = NA
#
#
# for(i in c(AGGREGATE, STATES)){
# for(j in (train.END - 20):max.INDEX){
# data.unemply_U[data.unemply_U$State == i & data.unemply_U$index == j, c("H1dlunemply_U", "H2dlunemply_U","H3dlunemply_U", "H4dlunemply_U","H5dlunemply_U")] =
# forecast(auto.arima(ts(subset(data.unemply_U, State == i & index<= j, "dlunemply_U"), f = 12), seasonal = TRUE, d= 0 , D= 0), 5)$mean
# print(paste(i,j))
# }
# }
#
# for(i in c(AGGREGATE, STATES)){
# for(j in (train.END - 20):max.INDEX){
# data.unemply_S[data.unemply_S$State == i & data.unemply_S$index == j, c("H1dlunemply_S", "H2dlunemply_S","H3dlunemply_S", "H4dlunemply_S","H5dlunemply_S")] =
# forecast(auto.arima(ts(subset(data.unemply_S, State == i & index<= j, "dlunemply_S"), f = 12), seasonal = FALSE, d= 0 , D= 0), 5)$mean
# print(paste(i,j))
# }
# }
# save(data.unemply_U, data.unemply_S, file = "fcast.unemply.RData")
load("fcast.unemply.RData")
### 3. Functions---------------
## Functins to compute forecast accuracy.
fcastAcc.AG = function(Test.data = Test.AG, start = min(Test.data$index), end = max(Test.data$index), panel = FALSE){
## This function compute measures of forecast accuracy of national level forecasts.
## Input:
# Test.data: data frame containing ture(ture) and forecasted series(fcasth?, fcasth?a, ? = 1,3,6), -
# the default value is "Test.AG" data frame from the forecast step.
# start : index from which the measures are calculated. default is the min index.
# end : index to which the measures are calculated. default is the max index.
## Output: a matrix containing the measures.
Test.data = subset(Test.data, index>=start & index <= end)
if(!panel){
measure.AG = rbind(with(Test.data, accuracy(fcasth1, true)),
with(Test.data, accuracy(fcasth1a, true)),
with(Test.data, accuracy(fcasth3, true)),
with(Test.data, accuracy(fcasth3a, true)),
with(Test.data, accuracy(fcasth6, true)),
with(Test.data, accuracy(fcasth6a, true))
)
rownames(measure.AG) = c("Directh1", "Aggh1", "Directh3", "Aggh3", "Directh6", "Aggh6")
#print(paste(start, "-", end))
}
if(panel){
measure.AG = rbind(with(Test.data, accuracy(fcasth1a, true)),
with(Test.data, accuracy(fcasth3a, true)),
with(Test.data, accuracy(fcasth6a, true))
)
rownames(measure.AG) = c("Aggh1", "Aggh3", "Aggh6")}
attr(measure.AG, "span") = paste(paste(start, "-", end))
measure.AG
}
fcastAcc.state = function(Test.data = Test.state, start = min(Test.data$index), end = max(Test.data$index)){
## This function compute measures of forecast accuracy of state level forecasts.
## Input:
# Test.data: data frame containing ture(ture) and forecasted series(fcasth?, fcasth?a, ? = 1,3,6), -
# the default value is "Test.state" data frame from the forecast step.
# start : index from which the measures are calculated. default is the min index.
# end : index to which the measures are calculated. default is the max index.
## Output: A matrix containing the measures.
Test.data = subset(Test.data, index>=start & index <= end)
STATES = levels(factor(Test.data$State))
measure.stateh1 = data.frame(State = STATES, RMSE = NA, MAPE = NA)
for(i in STATES)
measure.stateh1[measure.stateh1$State == i, c("RMSE", "MAPE")] = with(Test.data[Test.data$State == i,], accuracy(fcasth1, true))[,c("RMSE", "MAPE")]
measure.stateh3 = data.frame(State = STATES, RMSE = NA, MAPE = NA)
for(i in STATES)
measure.stateh3[measure.stateh3$State == i, c("RMSE", "MAPE")] = with(Test.data[Test.data$State == i,], accuracy(fcasth3, true))[,c("RMSE", "MAPE")]
measure.stateh6 = data.frame(State = STATES, RMSE = NA, MAPE = NA)
for(i in STATES)
measure.stateh6[measure.stateh6$State == i, c("RMSE", "MAPE")] = with(Test.data[Test.data$State == i,], accuracy(fcasth6, true))[,c("RMSE", "MAPE")]
measure.state = cbind(measure.stateh1, measure.stateh3[-1], measure.stateh6[-1])
names(measure.state)[-1] = c("RMSEh1", "MAPEh1", "RMSEh3", "MAPEh3", "RMSEh6", "MAPEh6")
attr(measure.state, "span") = paste(start, "-", end)
measure.state
}
fcastAcc.sum = function(Test.data = Test.state, start = min(Test.data$index), end = max(Test.data$index)){
## This function compute OVERALL measures of forecast accuracy of state level forecasts.
## Input:
# Test.data: data frame containing ture(ture) and forecasted series(fcasth?, fcasth?a, ? = 1,3,6), -
# the default value is "Test.state" data frame from the forecast step.
# start : index from which the measures are calculated. default is the min index.
# end : index to which the measures are calculated. default is the max index.
## Output: A data.frame containing the measures.
Test.data = subset(Test.data, index>=start & index <= end)
measure.state = fcastAcc.state(Test.data, start, end)
measure.sumh1 = c(RMSE.All = sqrt(mean((Test.data$true - Test.data$fcasth1)^2)), # RMSE with all states
MAPE.All = mean(abs(Test.data$true - Test.data$fcasth1)/abs(Test.data$true))*100, # MAPE with all states
RMSE.Md = median(measure.state$RMSEh1),
RMSE.Max = max(measure.state$RMSEh1),
RMSE.Min = min(measure.state$RMSEh1),
MAPE.Md = median(measure.state$MAPEh1),
MAPE.Max = max(measure.state$MAPEh1),
MAPE.Min = min(measure.state$MAPEh1))
measure.sumh3 = c(RMSE.All = sqrt(mean((Test.data$true - Test.data$fcasth3)^2, na.rm = TRUE)), # RMSE with all states
MAPE.All = mean(abs(Test.data$true - Test.data$fcasth3)/abs(Test.data$true), na.rm = TRUE)*100, # MAPE with all states
RMSE.Md = median(measure.state$RMSEh3),
RMSE.Max = max(measure.state$RMSEh3),
RMSE.Min = min(measure.state$RMSEh3),
MAPE.Md = median(measure.state$MAPEh3),
MAPE.Max = max(measure.state$MAPEh3),
MAPE.Min = min(measure.state$MAPEh3))
measure.sumh6 = c(RMSE.All = sqrt(mean((Test.data$true - Test.data$fcasth6)^2, na.rm = TRUE)), # RMSE with all states
MAPE.All = mean(abs(Test.data$true - Test.data$fcasth6)/abs(Test.data$true), na.rm = TRUE)*100, # MAPE with all states
RMSE.Md = median(measure.state$RMSEh6),
RMSE.Max = max(measure.state$RMSEh6),
RMSE.Min = min(measure.state$RMSEh6),
MAPE.Md = median(measure.state$MAPEh6),
MAPE.Max = max(measure.state$MAPEh6),
MAPE.Min = min(measure.state$MAPEh6))
measure.sum = rbind(measure.sumh1, measure.sumh3, measure.sumh6)
rownames(measure.sum) = c("Overallh1", "Overallh3", "Overallh6")
attr(measure.sum, "span") = paste(start, "-", end)
measure.sum
}
# Function for plotting national level forecasts
plot.AG = function(data, h, model = model.name, colors = c("gray40", "blue", "red"),
shape = c(21,20, 18), Lshape = c("dotted","dotted", "dotted"), panel = FALSE){
## Data must contain index, true, fcast1,3,6.
## The input must be the data frame Test.AG
#data = Test.AG; h =1; model.name = model.name; colors = c("gray40", "blue", "red");
#shape = c(21,20, 19); Lshape = c("dotted","dotted", "dotted")
if(!panel){
data = na.omit(data[,c("index", "true", paste0("fcasth", h), paste0("fcasth", h, "a"))])
if(is.factor(data$index)) start.index = min(as.numeric(levels(data$index)[data$index])) else
if(is.numeric(data$index)) start.index = min(data$index)
start.Year = Panel[Panel$State == Aggregate & Panel$index == start.index, "Cal.Year"]
start.Month = Panel[Panel$State == Aggregate & Panel$index == start.index, "Month"]
true = ts(data[, "true"], start = c(start.Year, start.Month), f = 12)
fcast = ts(data[, paste0("fcasth", h)], start = c(start.Year, start.Month), f = 12)
fcasta = ts(data[, paste0("fcasth", h, "a")], start = c(start.Year, start.Month), f = 12)
par(mar = c(4,4,3,1)+ 0.1)
plot( true, type = "b", lty = Lshape[1], col = colors[1], pch = shape[1], xlab = "", ylab = "",
main = paste("Forecasting National Level Applications:", model, "horizon =", h)
)
lines(fcast, type = "b", lty = Lshape[2], col = colors[2], pch = shape[2])
lines(fcasta, type = "b", lty = Lshape[3], col = colors[3], pch = shape[3])
#abline(h = 0, , lty = "dotted", col = "grey")
legend("topleft", c("True", "Direct", "Aggregated"), lty = Lshape, col = colors, pch = shape)
}
####
if(panel){
data = na.omit(data[,c("index", "true", paste0("fcasth", h, "a"))])
if(is.factor(data$index)) start.index = min(as.numeric(levels(data$index)[data$index])) else
if(is.numeric(data$index)) start.index = min(data$index)
start.Year = Panel[Panel$State == Aggregate & Panel$index == start.index, "Cal.Year"]
start.Month = Panel[Panel$State == Aggregate & Panel$index == start.index, "Month"]
true = ts(data[, "true"], start = c(start.Year, start.Month), f = 12)
fcasta = ts(data[, paste0("fcasth", h, "a")], start = c(start.Year, start.Month), f = 12)
par(mar = c(4,4,3,1)+ 0.1)
plot( true, type = "b", lty = Lshape[1], col = colors[1], pch = shape[1], xlab = "", ylab = "",
main = paste("Forecasting National Level Applications:", model, "horizon =", h)
)
lines(fcasta, type = "b", lty = Lshape[3], col = colors[3], pch = shape[3])
#abline(h = 0, , lty = "dotted", col = "grey")
legend("topleft", c("True", "Aggregated"), lty = Lshape[c(1,3)], col = colors[c(1,3)], pch = shape[c(1,3)])
}
}
# Function for plotting state level forecasts
plot.state = function(data, h, state, model = model.name, colors = c("gray40", "blue", "red"),
shape = c(21,20, 18), Lshape = c("dotted","dotted", "dotted")){
## Data must contain index, true, fcast1,3,6.
## The input must be the data frame Test.state[Test.state == i, ]
data = na.omit(data[,c("index", "true", paste0("fcasth", h))])
if(is.factor(data$index)) start.index = min(as.numeric(levels(data$index)[data$index])) else
if(is.numeric(data$index)) start.index = min(data$index)
start.Year = Panel[Panel$State == Aggregate & Panel$index == start.index, "Cal.Year"]
start.Month = Panel[Panel$State == Aggregate & Panel$index == start.index, "Month"]
true = ts(data[, "true"], start = c(start.Year, start.Month), f = 12)
fcast = ts(data[, paste0("fcasth", h)], start = c(start.Year, start.Month), f = 12)
par(mar = c(4,4,3,1)+ 0.1)
plot( true, type = "b", lty = Lshape[1], col = colors[1], pch = shape[1], xlab = "", ylab = "",
main = paste("Forecasting State Level Applications:", model, "horizon =", h, state))
lines(fcast, type = "b", lty = Lshape[2], col = colors[2], pch = shape[2])
#abline(h = 0, , lty = "dotted", col = "grey")
legend("topleft", c("True", "Forecast"), lty = Lshape, col = colors, pch = shape)
}
## Defining conversion functions between index and date
i2d = function(i, i.min = 1, i.max = max.INDEX, start = c(2000,10), f = 12){
t = ts(i.min:max.INDEX, start = c(2000,10), f = 12)
c(trunc(time(t))[t == i],cycle(t)[t == i])
}
d2i = function(y,m, i.min = 1, i.max = max.INDEX, start = c(2000,10), f = 12){
t = ts(i.min:max.INDEX, start = c(2000,10), f = 12)
as.numeric(window(t, start = c(y,m), end = c(y,m)))
}
|
# Create a variable `lyric` that contains the text "I like to eat apples and bananas"
# Use the `substr()` function to extract the 1st through 13th letters from the `lyric`
# Use `?substr` to see more about this function
# Store the result in a variable called `intro`
# Use the `substr()` function to extract the 15th through the last letter of `lyric`
# Hint: use `nchar()` to determine how many letters there are!
# Store the result in a variable called `fruits`
# Use the `gsub()` function to substitute all the "a"s in `fruits` with "ee".
# Hint: see http://www.endmemo.com/program/R/sub.php for a simpmle example (or use `?gsub`)
# Store the result in a variable called `fruits.e`
# Use the `gsub()` function to substitute all the "a"s in `fruits` with "o".
# Store the result in a variable called `fruits.o`
# Create a new variable `lyric.e` that is the `intro` combined with the new `fruits.e` ending
# Print out this variable
# Print out the `intro` combined with the new `fruits.o` ending | /exercise 4/exercise.R | permissive | simran18/module6-functions | R | false | false | 1,010 | r | # Create a variable `lyric` that contains the text "I like to eat apples and bananas"
# Use the `substr()` function to extract the 1st through 13th letters from the `lyric`
# Use `?substr` to see more about this function
# Store the result in a variable called `intro`
# Use the `substr()` function to extract the 15th through the last letter of `lyric`
# Hint: use `nchar()` to determine how many letters there are!
# Store the result in a variable called `fruits`
# Use the `gsub()` function to substitute all the "a"s in `fruits` with "ee".
# Hint: see http://www.endmemo.com/program/R/sub.php for a simpmle example (or use `?gsub`)
# Store the result in a variable called `fruits.e`
# Use the `gsub()` function to substitute all the "a"s in `fruits` with "o".
# Store the result in a variable called `fruits.o`
# Create a new variable `lyric.e` that is the `intro` combined with the new `fruits.e` ending
# Print out this variable
# Print out the `intro` combined with the new `fruits.o` ending |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cvForecastExtraFunctions.R
\name{daysAgg}
\alias{daysAgg}
\title{Days Extra function}
\usage{
daysAgg(data, process, multiple = NULL, na.rm = FALSE)
}
\description{
Days Extra function
}
| /man/daysAgg.Rd | permissive | evandeilton/cvforecast | R | false | true | 266 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cvForecastExtraFunctions.R
\name{daysAgg}
\alias{daysAgg}
\title{Days Extra function}
\usage{
daysAgg(data, process, multiple = NULL, na.rm = FALSE)
}
\description{
Days Extra function
}
|
#' @title Powerlaw function for absolute growth rate
#'
#' @description
#' \code{PowerlawAGR} returns the absolute growth rate (dM/dt) following a power law/allometric functional form
#'
#'
#' @param r allometric constant, a number or numeric vector
#' @param M biomass (kg/g), a number or numeric vector
#' @param B scaling exponent, a number or numeric vector
#' @return dM/dt, a number or numeric vector of the absolute growth rate following the power law
#'
#' @examples
#' #Change in mass over time
#' agr <- PowerlawAGR(0.25,100,0.4)
#' agr
#' 3.62
#'
#' @references
#' C.E.T. Paine, T.R. Marthews, D.R. Vogt, D. Purves, M. Rees, A. Hector, and
#' L.A. Turnbull. 2012. How to fit nonlinear growth models and calculate growth
#' rates: an update for ecologists. Methods in Ecology and Evolution 3:245-256
PowerlawAGR <- function(r,M,B){
AGR <- (r*M)^B
return(AGR)
}
| /R/powerlaw.R | no_license | efeichtinger/plantgrowth | R | false | false | 895 | r | #' @title Powerlaw function for absolute growth rate
#'
#' @description
#' \code{PowerlawAGR} returns the absolute growth rate (dM/dt) following a power law/allometric functional form
#'
#'
#' @param r allometric constant, a number or numeric vector
#' @param M biomass (kg/g), a number or numeric vector
#' @param B scaling exponent, a number or numeric vector
#' @return dM/dt, a number or numeric vector of the absolute growth rate following the power law
#'
#' @examples
#' #Change in mass over time
#' agr <- PowerlawAGR(0.25,100,0.4)
#' agr
#' 3.62
#'
#' @references
#' C.E.T. Paine, T.R. Marthews, D.R. Vogt, D. Purves, M. Rees, A. Hector, and
#' L.A. Turnbull. 2012. How to fit nonlinear growth models and calculate growth
#' rates: an update for ecologists. Methods in Ecology and Evolution 3:245-256
PowerlawAGR <- function(r,M,B){
AGR <- (r*M)^B
return(AGR)
}
|
### packages needed for the App
packages <- c("shiny","shinyWidgets","shinyjs","leaflet","shinycssloaders",
"htmltools","shinythemes","shinyBS","markdown",
"rgdal","maptools","sp","spdep","cluster","fpc",
"ClustGeo","devtools","rgeos")
## installing required packages
install.packages(packages,quiet = TRUE)
## installing required packages
if (!require(gpclib)) install.packages("gpclib", type="source")
gpclibPermit()
## installing the dev version of bsplus from GitHub
require(devtools)
devtools::install_github("ijlyttle/bsplus")
| /newerhoods/setup.R | permissive | kaushik12/newerhoods | R | false | false | 596 | r | ### packages needed for the App
packages <- c("shiny","shinyWidgets","shinyjs","leaflet","shinycssloaders",
"htmltools","shinythemes","shinyBS","markdown",
"rgdal","maptools","sp","spdep","cluster","fpc",
"ClustGeo","devtools","rgeos")
## installing required packages
install.packages(packages,quiet = TRUE)
## installing required packages
if (!require(gpclib)) install.packages("gpclib", type="source")
gpclibPermit()
## installing the dev version of bsplus from GitHub
require(devtools)
devtools::install_github("ijlyttle/bsplus")
|
classNames=c("earn","acq","money-fx","grain","crude",
"trade","interest","ship","wheat","corn")
allClasses=matrix(data=0,nrow=length(combinedCorpus),ncol=length(classNames),
dimnames=list(c(1:length(combinedCorpus)),classNames))
for (i in 1:length(combinedCorpus)){
allClasses[i,match(tm::meta(combinedCorpus[[i]],tag="Topics"),classNames)]=1
}
DTM_combC<-DocumentTermMatrix(combinedCorpus)
y<-DTM_combC[,bestPerfFeats]
y<-as.data.frame(as.matrix(y))
# Hierarchical Agglomerative
distance<- dist(y, method = "euclidean") # or binary,canberra, maximum, manhattan
fit1 <- hclust(distance, method="ward")
groups <- cutree(fit1, k=10) # cut tree into 5 clusters
groups1 <- as.data.frame(groups)
plot(fit1, labels = NULL, hang = 0.1,
axes = TRUE, frame.plot = FALSE, ann = TRUE,
main = "Cluster Dendrogram",
sub = NULL, xlab = NULL, ylab = "Height") # display dendogram
rect.hclust(fit1, k=10, border="red")
plot(prcomp(y)$x, col=groups, pch=20, cex=0.5,xlim =c(-3.5,10.5),ylim=c(-10,5))
# K-means
wssplot <- function(data, nc=15, seed=1234){
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i)$withinss)}
plot(1:nc, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")}
#df <- scale(y)
wssplot(y)
fit2 <- kmeans(y, 10)
plot(prcomp(y)$x, col=fit2$cl,pch=20, cex=0.5,xlim =c(-3.5,10.5),ylim=c(-10,5))
# EM clustering
library(mclust)
ptm <- proc.time()
fit3 <- Mclust(y,G=10)
proc.time() - ptm
plot(prcomp(y)$x, col=fit3$cl,pch=20, cex=0.5,xlim =c(-3.5,10.5),ylim=c(-10,5))
#summary(fit3) # display the best model
clustCMf <- function(groups,classes){
clustCM<-matrix(0,10,12)
colnames(clustCM)=c("earn","acq","money-fx","grain","crude","trade","interest","ship","wheat","corn","others","total")
for(i in 1:dim(classes)[1])
{
if (sum(classes[i,])==0){clustCM[groups$groups[i],11]=clustCM[groups$groups[i],11]+1; }
else
{clustCM[groups$groups[i],1:10]= clustCM[groups$groups[i],1:10]+classes[i,1:10]}
}
for(i in 1:10)
{
clustCM[i,1:11]=clustCM[i,1:11]/length(which(groups$groups==i))
clustCM[i,12]=length(which(groups$groups==i))
}
return (clustCM)
}
#cbind(trainingClasses,rep(0, length(trainingClasses) ))
table1<- clustCMf(groups1,allClasses)
groups2<-as.data.frame(fit2$cl)
colnames(groups2)<-"groups"
table2<- clustCMf(groups2,allClasses)
groups3<-as.data.frame(fit3$cl)
colnames(groups3)<-"groups"
table3<- clustCMf(groups3,allClasses)
| /clusteringTests.R | no_license | jalewis472/CS909-Text-Classification | R | false | false | 2,568 | r | classNames=c("earn","acq","money-fx","grain","crude",
"trade","interest","ship","wheat","corn")
allClasses=matrix(data=0,nrow=length(combinedCorpus),ncol=length(classNames),
dimnames=list(c(1:length(combinedCorpus)),classNames))
for (i in 1:length(combinedCorpus)){
allClasses[i,match(tm::meta(combinedCorpus[[i]],tag="Topics"),classNames)]=1
}
DTM_combC<-DocumentTermMatrix(combinedCorpus)
y<-DTM_combC[,bestPerfFeats]
y<-as.data.frame(as.matrix(y))
# Hierarchical Agglomerative
distance<- dist(y, method = "euclidean") # or binary,canberra, maximum, manhattan
fit1 <- hclust(distance, method="ward")
groups <- cutree(fit1, k=10) # cut tree into 5 clusters
groups1 <- as.data.frame(groups)
plot(fit1, labels = NULL, hang = 0.1,
axes = TRUE, frame.plot = FALSE, ann = TRUE,
main = "Cluster Dendrogram",
sub = NULL, xlab = NULL, ylab = "Height") # display dendogram
rect.hclust(fit1, k=10, border="red")
plot(prcomp(y)$x, col=groups, pch=20, cex=0.5,xlim =c(-3.5,10.5),ylim=c(-10,5))
# K-means
wssplot <- function(data, nc=15, seed=1234){
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i)$withinss)}
plot(1:nc, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")}
#df <- scale(y)
wssplot(y)
fit2 <- kmeans(y, 10)
plot(prcomp(y)$x, col=fit2$cl,pch=20, cex=0.5,xlim =c(-3.5,10.5),ylim=c(-10,5))
# EM clustering
library(mclust)
ptm <- proc.time()
fit3 <- Mclust(y,G=10)
proc.time() - ptm
plot(prcomp(y)$x, col=fit3$cl,pch=20, cex=0.5,xlim =c(-3.5,10.5),ylim=c(-10,5))
#summary(fit3) # display the best model
clustCMf <- function(groups,classes){
clustCM<-matrix(0,10,12)
colnames(clustCM)=c("earn","acq","money-fx","grain","crude","trade","interest","ship","wheat","corn","others","total")
for(i in 1:dim(classes)[1])
{
if (sum(classes[i,])==0){clustCM[groups$groups[i],11]=clustCM[groups$groups[i],11]+1; }
else
{clustCM[groups$groups[i],1:10]= clustCM[groups$groups[i],1:10]+classes[i,1:10]}
}
for(i in 1:10)
{
clustCM[i,1:11]=clustCM[i,1:11]/length(which(groups$groups==i))
clustCM[i,12]=length(which(groups$groups==i))
}
return (clustCM)
}
#cbind(trainingClasses,rep(0, length(trainingClasses) ))
table1<- clustCMf(groups1,allClasses)
groups2<-as.data.frame(fit2$cl)
colnames(groups2)<-"groups"
table2<- clustCMf(groups2,allClasses)
groups3<-as.data.frame(fit3$cl)
colnames(groups3)<-"groups"
table3<- clustCMf(groups3,allClasses)
|
\name{tawny.types-package}
\alias{tawny.types-package}
\alias{tawny.types}
\alias{tawny.options}
\alias{Covariance}
\alias{Correlation}
\docType{package}
\title{Common types for tawny}
\description{
Base types used throughout tawny
}
\details{
\tabular{ll}{
Package: \tab tawny.types\cr
Type: \tab Package\cr
Version: \tab 1.1.3\cr
Date: \tab 2014-05-06\cr
License: \tab What license is it under?\cr
LazyLoad: \tab yes\cr
}
Create portfolio objects from these types
}
\author{
Brian Lee Yung Rowe
Maintainer: Brian Lee Yung Rowe <r@zatonovo.com>
}
\keyword{ package }
\seealso{ \code{\link[tawny]{tawny-package}} }
\examples{
\dontrun{
p <- TawnyPortfolio(c('AAPL','GOOG','IBM'), 150,200)
m <- BenchmarkPortfolio('^GSPC', 150,200)
}
}
| /man/tawny.types-package.Rd | no_license | IanMadlenya/tawny.types | R | false | false | 736 | rd | \name{tawny.types-package}
\alias{tawny.types-package}
\alias{tawny.types}
\alias{tawny.options}
\alias{Covariance}
\alias{Correlation}
\docType{package}
\title{Common types for tawny}
\description{
Base types used throughout tawny
}
\details{
\tabular{ll}{
Package: \tab tawny.types\cr
Type: \tab Package\cr
Version: \tab 1.1.3\cr
Date: \tab 2014-05-06\cr
License: \tab What license is it under?\cr
LazyLoad: \tab yes\cr
}
Create portfolio objects from these types
}
\author{
Brian Lee Yung Rowe
Maintainer: Brian Lee Yung Rowe <r@zatonovo.com>
}
\keyword{ package }
\seealso{ \code{\link[tawny]{tawny-package}} }
\examples{
\dontrun{
p <- TawnyPortfolio(c('AAPL','GOOG','IBM'), 150,200)
m <- BenchmarkPortfolio('^GSPC', 150,200)
}
}
|
library(tidyverse)
ansur_df <- read_csv("http://data.ntupsychology.net/ansur.csv")
# Make a scatterplot of weight by height
ggplot(ansur_df,
aes(x = height, y = weight)
) + geom_point(size = 0.5, alpha = 0.5)
# Make a histogram of weight
ggplot(ansur_df,
aes(x = weight)
) + geom_histogram(binwidth = 5, colour = 'white')
# Make a scatterplot of weight by height
# with colour coding of gender
ggplot(mapping = aes(x = height, y = weight, colour = gender),
data = ansur_df) + geom_point(size = 0.5)
# Make a histogram of weight by gender
ggplot(ansur_df,
aes(x = weight, fill = gender)
) + geom_histogram(binwidth = 5, colour = 'white')
# an example custom function
f <- function(x, y) {x/y}
# Make a histogram of weight by height tercile
ggplot(ansur_df,
aes(x = weight)
) + geom_histogram(binwidth = 5, colour = 'white') +
facet_wrap(~height_tercile)
# calculate mean and sd of weight
summarize(ansur_df,
average = mean(weight),
st_dev = sd(weight))
# calculate mean and sd of weight
# for each height tercile group
summarize(group_by(ansur_df, height_tercile),
average = mean(weight),
st_dev = sd(weight))
# calculate mean and sd of weight
# for each height tercile group (using the %>% )
group_by(ansur_df, height_tercile) %>%
summarize(average = mean(weight),
st_dev = sd(weight))
# calculate mean and sd of weight, and mean of height,
# for each height tercile group
summarize(group_by(ansur_df, height_tercile),
average = mean(weight),
st_dev = sd(weight),
avg_height = mean(height))
| /scripts/oct20.R | no_license | mark-andrews/psyc30815-202021 | R | false | false | 1,644 | r | library(tidyverse)
ansur_df <- read_csv("http://data.ntupsychology.net/ansur.csv")
# Make a scatterplot of weight by height
ggplot(ansur_df,
aes(x = height, y = weight)
) + geom_point(size = 0.5, alpha = 0.5)
# Make a histogram of weight
ggplot(ansur_df,
aes(x = weight)
) + geom_histogram(binwidth = 5, colour = 'white')
# Make a scatterplot of weight by height
# with colour coding of gender
ggplot(mapping = aes(x = height, y = weight, colour = gender),
data = ansur_df) + geom_point(size = 0.5)
# Make a histogram of weight by gender
ggplot(ansur_df,
aes(x = weight, fill = gender)
) + geom_histogram(binwidth = 5, colour = 'white')
# an example custom function
f <- function(x, y) {x/y}
# Make a histogram of weight by height tercile
ggplot(ansur_df,
aes(x = weight)
) + geom_histogram(binwidth = 5, colour = 'white') +
facet_wrap(~height_tercile)
# calculate mean and sd of weight
summarize(ansur_df,
average = mean(weight),
st_dev = sd(weight))
# calculate mean and sd of weight
# for each height tercile group
summarize(group_by(ansur_df, height_tercile),
average = mean(weight),
st_dev = sd(weight))
# calculate mean and sd of weight
# for each height tercile group (using the %>% )
group_by(ansur_df, height_tercile) %>%
summarize(average = mean(weight),
st_dev = sd(weight))
# calculate mean and sd of weight, and mean of height,
# for each height tercile group
summarize(group_by(ansur_df, height_tercile),
average = mean(weight),
st_dev = sd(weight),
avg_height = mean(height))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/brainbrowser.R
\name{unify_data}
\alias{unify_data}
\title{Unify data}
\usage{
unify_data(fig, data = NULL)
}
\arguments{
\item{fig}{a layout}
\item{data}{the reference data}
}
\description{
Fill in missing layout data from a reference list of arguments
to `bb`
}
| /man/unify_data.Rd | no_license | cfhammill/rbrainbrowser | R | false | true | 343 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/brainbrowser.R
\name{unify_data}
\alias{unify_data}
\title{Unify data}
\usage{
unify_data(fig, data = NULL)
}
\arguments{
\item{fig}{a layout}
\item{data}{the reference data}
}
\description{
Fill in missing layout data from a reference list of arguments
to `bb`
}
|
testlist <- list(phi = numeric(0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142, 2.88358101657793e-242, 2.898978379072e+128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(dcurver:::ddc,testlist)
str(result) | /dcurver/inst/testfiles/ddc/AFL_ddc/ddc_valgrind_files/1609868825-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 714 | r | testlist <- list(phi = numeric(0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142, 2.88358101657793e-242, 2.898978379072e+128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(dcurver:::ddc,testlist)
str(result) |
# jump start code
# to be used with R
# stacked area chart
# reference Chang book, starting pg 62 ("Making a Graph with Shaded Area")
# install and load needed packages
install.packages("readxl")
install.packages("lattice")
install.packages("ggplot2")
install.packages("reshape")
library(readxl)
library(lattice)
library(ggplot2)
library(reshape)
# set working directory
setwd("~/Desktop/R/")
################### First Chart #####################
# load data set for number of users by device
# source data in csv file
vg_device <- read.csv("statistic_number-of-mobile-gamers-in-north-america-2014-2017-by-device.csv")
# examine the first five rows of the data set
head(vg_device)
# Preparing the data set for use
# Renaming the data columns
names(vg_device) <- c('Device', '2014', '2015', '2016', '2017')
# Combining the year columns into one column
vg_device2 <- melt(vg_device, id.vars=c('Device'),var='Year')
# Checking result
head(vg_device2)
# Changing data type of columns
str(vg_device2)
vg_device2$Device <- as.character(vg_device2$Device)
vg_device2$Year <- as.character(vg_device2$Year)
vg_device2$Year <- as.numeric(vg_device2$Year)
# Checking result
str(vg_device2)
# Area chart
# This chart was not used for the assignment
ggplot(vg_device2, aes(x=Year, y=value, fill=Device)) +
geom_area() +
scale_fill_manual(values=c('#78B138', '#B13878', '#3878B1')) +
theme_classic() +
labs(title="Number of Mobile Gamers by Device", subtitle="U.S. and Canada", y="Millions of Users", x="Year") +
theme(
plot.title = element_text(face="bold", size=18),
axis.title.x = element_text(face="bold", size=14),
axis.title.y = element_text(face="bold", size=14),
axis.text.x = element_text(face="bold", size=16),
axis.text.y = element_text(face="bold", size=16),
legend.text = element_text(face="bold", size=12),
legend.title = element_text(face="bold", size=12))
# Line chart
# This chart was used for the assignment
ggplot(vg_device2, aes(x=Year, y=value, group=Device, color=Device)) +
geom_line(size=1.5) +
scale_color_manual(values=c('#78B138', '#B13878', '#3878B1')) +
theme_classic() +
labs(title="Number of Mobile Gamers by Device", subtitle="U.S. and Canada", y="Millions of Users", x="Year") +
theme(
plot.title = element_text(face="bold", size=18),
axis.title.x = element_text(face="bold", size=14),
axis.title.y = element_text(face="bold", size=14),
axis.text.x = element_text(face="bold", size=16),
axis.text.y = element_text(face="bold", size=16),
legend.text = element_text(face="bold", size=12),
# legend.title = element_text(face="bold", size=12),
legend.title = element_blank(),
legend.position="bottom", legend.box = "horizontal")
################## Second Chart ######################
# load data set for consumer spending by segment
# source data in csv file
vg_segment <- read.csv("statistic_consumer-spending-on-gaming-in-the-us-from-2010-to-2018-by-segment.csv")
# examine the first five rows of the data set
head(vg_segment)
# Preparing the data set for use
# Combining the segment columns into one column
vg_segment2 <- melt(vg_segment, id.vars=c('Year'),var='Segment')
# Checking result
head(vg_segment2)
# Changing data type of columns
str(vg_segment2)
vg_segment2$Segment <- as.character(vg_segment2$Segment)
# Checking result
str(vg_segment2)
#changing the order in which the segments are plotted
vg_segment2$Segment <- factor(vg_segment2$Segment, levels=c('Content', 'Hardware', 'Accessories'))
# Area chart
# This chart was not used for the assignment
ggplot(vg_segment2, aes(x=Year, y=value, fill=Segment)) +
geom_area() +
scale_fill_manual(values=c('#3878B1', '#78B138','#B13878')) +
theme_classic() +
labs(title="Consumer Spending on Gaming", subtitle="U.S. from 2010 to 2018", y="Billon U.S. Dollars", x="Year") +
theme(
plot.title = element_text(face="bold", size=18),
axis.title.x = element_text(face="bold", size=14),
axis.title.y = element_text(face="bold", size=14),
axis.text.x = element_text(face="bold", size=16),
axis.text.y = element_text(face="bold", size=16),
legend.text = element_text(face="bold", size=12),
legend.title = element_text(face="bold", size=12))
# Line chart
# This chart was used for the assignment
ggplot(vg_segment2, aes(x=Year, y=value, group=Segment, color=Segment)) +
geom_line(size=1.5) +
scale_color_manual(values=c('#3878B1', '#78B138','#B13878')) +
theme_classic() +
labs(title="Consumer Spending on Gaming", subtitle="U.S. from 2010 to 2018", y="Billon U.S. Dollars", x="Year") +
theme(
plot.title = element_text(face="bold", size=18),
axis.title.x = element_text(face="bold", size=14),
axis.title.y = element_text(face="bold", size=14),
axis.text.x = element_text(face="bold", size=16),
axis.text.y = element_text(face="bold", size=16),
legend.text = element_text(face="bold", size=12),
# legend.title = element_text(face="bold", size=12),
legend.title = element_blank(),
legend.position="bottom", legend.box = "horizontal")
| /Data Visualization Projects/Video Game Industry/Assignment_1_Wanat_455.R | no_license | jmwanat/data_analytics | R | false | false | 5,089 | r | # jump start code
# to be used with R
# stacked area chart
# reference Chang book, starting pg 62 ("Making a Graph with Shaded Area")
# install and load needed packages
install.packages("readxl")
install.packages("lattice")
install.packages("ggplot2")
install.packages("reshape")
library(readxl)
library(lattice)
library(ggplot2)
library(reshape)
# set working directory
setwd("~/Desktop/R/")
################### First Chart #####################
# load data set for number of users by device
# source data in csv file
vg_device <- read.csv("statistic_number-of-mobile-gamers-in-north-america-2014-2017-by-device.csv")
# examine the first five rows of the data set
head(vg_device)
# Preparing the data set for use
# Renaming the data columns
names(vg_device) <- c('Device', '2014', '2015', '2016', '2017')
# Combining the year columns into one column
vg_device2 <- melt(vg_device, id.vars=c('Device'),var='Year')
# Checking result
head(vg_device2)
# Changing data type of columns
str(vg_device2)
vg_device2$Device <- as.character(vg_device2$Device)
vg_device2$Year <- as.character(vg_device2$Year)
vg_device2$Year <- as.numeric(vg_device2$Year)
# Checking result
str(vg_device2)
# Area chart
# This chart was not used for the assignment
ggplot(vg_device2, aes(x=Year, y=value, fill=Device)) +
geom_area() +
scale_fill_manual(values=c('#78B138', '#B13878', '#3878B1')) +
theme_classic() +
labs(title="Number of Mobile Gamers by Device", subtitle="U.S. and Canada", y="Millions of Users", x="Year") +
theme(
plot.title = element_text(face="bold", size=18),
axis.title.x = element_text(face="bold", size=14),
axis.title.y = element_text(face="bold", size=14),
axis.text.x = element_text(face="bold", size=16),
axis.text.y = element_text(face="bold", size=16),
legend.text = element_text(face="bold", size=12),
legend.title = element_text(face="bold", size=12))
# Line chart
# This chart was used for the assignment
ggplot(vg_device2, aes(x=Year, y=value, group=Device, color=Device)) +
geom_line(size=1.5) +
scale_color_manual(values=c('#78B138', '#B13878', '#3878B1')) +
theme_classic() +
labs(title="Number of Mobile Gamers by Device", subtitle="U.S. and Canada", y="Millions of Users", x="Year") +
theme(
plot.title = element_text(face="bold", size=18),
axis.title.x = element_text(face="bold", size=14),
axis.title.y = element_text(face="bold", size=14),
axis.text.x = element_text(face="bold", size=16),
axis.text.y = element_text(face="bold", size=16),
legend.text = element_text(face="bold", size=12),
# legend.title = element_text(face="bold", size=12),
legend.title = element_blank(),
legend.position="bottom", legend.box = "horizontal")
################## Second Chart ######################
# load data set for consumer spending by segment
# source data in csv file
vg_segment <- read.csv("statistic_consumer-spending-on-gaming-in-the-us-from-2010-to-2018-by-segment.csv")
# examine the first five rows of the data set
head(vg_segment)
# Preparing the data set for use
# Combining the segment columns into one column
vg_segment2 <- melt(vg_segment, id.vars=c('Year'),var='Segment')
# Checking result
head(vg_segment2)
# Changing data type of columns
str(vg_segment2)
vg_segment2$Segment <- as.character(vg_segment2$Segment)
# Checking result
str(vg_segment2)
#changing the order in which the segments are plotted
vg_segment2$Segment <- factor(vg_segment2$Segment, levels=c('Content', 'Hardware', 'Accessories'))
# Area chart
# This chart was not used for the assignment
ggplot(vg_segment2, aes(x=Year, y=value, fill=Segment)) +
geom_area() +
scale_fill_manual(values=c('#3878B1', '#78B138','#B13878')) +
theme_classic() +
labs(title="Consumer Spending on Gaming", subtitle="U.S. from 2010 to 2018", y="Billon U.S. Dollars", x="Year") +
theme(
plot.title = element_text(face="bold", size=18),
axis.title.x = element_text(face="bold", size=14),
axis.title.y = element_text(face="bold", size=14),
axis.text.x = element_text(face="bold", size=16),
axis.text.y = element_text(face="bold", size=16),
legend.text = element_text(face="bold", size=12),
legend.title = element_text(face="bold", size=12))
# Line chart
# This chart was used for the assignment
ggplot(vg_segment2, aes(x=Year, y=value, group=Segment, color=Segment)) +
geom_line(size=1.5) +
scale_color_manual(values=c('#3878B1', '#78B138','#B13878')) +
theme_classic() +
labs(title="Consumer Spending on Gaming", subtitle="U.S. from 2010 to 2018", y="Billon U.S. Dollars", x="Year") +
theme(
plot.title = element_text(face="bold", size=18),
axis.title.x = element_text(face="bold", size=14),
axis.title.y = element_text(face="bold", size=14),
axis.text.x = element_text(face="bold", size=16),
axis.text.y = element_text(face="bold", size=16),
legend.text = element_text(face="bold", size=12),
# legend.title = element_text(face="bold", size=12),
legend.title = element_blank(),
legend.position="bottom", legend.box = "horizontal")
|
#' Create a simulator function of a GAM
#'
#' @param model
#' @param post a `function(x) {...}` applied to the output of the resulting
#' function
#' @return a closure of `newdata` and `newparms` that returns a vector of simulated
#' data from the fitted GAM model
#' @importFrom mgcv gam
#' @export
create_model_simulator <- function(model, post = identity){
fm <- rlang::expr_text(formula(model))
beta <- coef(model)
fam <- model[["family"]]
inv <- fam[["linkinv"]]
on.exit({ rm(model) })
function(newdata, parameter_update = function(b, x) { b }){
X <- mgcv::gam(as.formula(fm), data = newdata, family = fam, fit = FALSE)[["X"]]
beta <- parameter_update(b = beta, x = X)
post(inv(X %*% beta))
}
}
#' Create a simulator
#'
#' @param exposure_sim the result of `create_model_simulator` for the exposure
#' of interest
#' @param outcome_sim the result of `create_model_simulator` for the outcome
#' of interest
#' @export
create_simulator <- function(exposure_sim, outcome_sim){
force(exposure_sim)
force(outcome_sim)
function(newdata, exposure_newparms = NULL, outcome_newparms = NULL){
df <- newdata
df[["A"]] <- exposure_sim(df, exposure_newparms)
df %>%
{
hold <- .
dplyr::mutate(
hold,
# Update treatment of neighbors
tr_neighbors = purrr::map(
.x = neighbor_pids,
.f = ~ hold[["A"]][hold[["pid"]] %in% .x]
)
)
} %>%
dplyr::mutate(
# Proportion of boundary shared with treated units
A_tilde = purrr::map2_dbl(
.x = tr_neighbors,
.y = neighbor_boundary_lengths,
.f = function(x, y){
`if`(length(x) == 0 || sum(as.numeric(y)) == 0,
0, sum(x * y)/sum(y))
})
) ->
df
df[["Y"]] <- outcome_sim(df, outcome_newparms)
df
}
}
#' Creates a function which can produce the basis for simulation datasets
#'
#' @param basisdt a dataset containing `pid`, `geometry`, `neighbor_pids`,
#' `neighbor_boundary_lengths`
#' @export
create_sim_basis_maker <- function(basisdt){
function(seed_id, km_buffer){
loc <- sf::st_buffer(basisdt$geometry[basisdt$pid == seed_id, "geometry"],
units::as_units(km_buffer, "km"))
prd <-
drop(sf::st_intersects(basisdt$geometry, loc, sparse = FALSE)) |
drop(sf::st_within(basisdt$geometry, loc, sparse = FALSE))
basisdt[prd, ] %>%
{
hold <- .
hold %>%
dplyr::mutate(
neighbors_pids = purrr::map(
.x = neighbor_pids,
.f = ~ which(hold$pid %in% .x)
),
neighbors = purrr::map(
.x = neighbor_pids,
.f = ~ which(hold$pid %in% .x)
),
neighbor_boundary_lengths = purrr::map2(
.x = neighbor_boundary_lengths,
.y = neighbor_pids,
.f = ~ .x[.y %in% hold[["pid"]]]
)
)
}
}
}
#' Create a simulation basis dataset
#' @export
make_sim_basis_data <- function(basis_maker, id, buffer,
checker = function(hash) { FALSE },
redirector = function(out, hash) { out }){
sha <- digest::sha1(list(basis_maker, id, buffer))
if (checker(sha)) { return(invisible(NULL)) }
dt <-
basis_maker(seed_id = id, km_buffer = buffer) %>%
dplyr::select(-geometry)
out <-
list(
data = dt,
sha = sha,
id = id,
buffer = buffer,
n = nrow(dt)
)
redirector(out, sha)
}
#' Create a simulation dataset
#' @export
make_sim <- function(basedt, simulator,
parms = list(exposure_newparms = identity, outcome_newparms = identity)){
simdt <- do.call(simulator, args = c(list(newdata = basedt), parms))
list(
data = simdt,
mean_A = mean(simdt$A),
mean_A_tilde = mean(simdt$A_tilde),
mean_Y = mean(simdt$Y),
parms = parms
)
} | /R/simulate_data.R | permissive | bsaul/conservation_spillover | R | false | false | 4,081 | r |
#' Create a simulator function of a GAM
#'
#' @param model
#' @param post a `function(x) {...}` applied to the output of the resulting
#' function
#' @return a closure of `newdata` and `newparms` that returns a vector of simulated
#' data from the fitted GAM model
#' @importFrom mgcv gam
#' @export
create_model_simulator <- function(model, post = identity){
fm <- rlang::expr_text(formula(model))
beta <- coef(model)
fam <- model[["family"]]
inv <- fam[["linkinv"]]
on.exit({ rm(model) })
function(newdata, parameter_update = function(b, x) { b }){
X <- mgcv::gam(as.formula(fm), data = newdata, family = fam, fit = FALSE)[["X"]]
beta <- parameter_update(b = beta, x = X)
post(inv(X %*% beta))
}
}
#' Create a simulator
#'
#' @param exposure_sim the result of `create_model_simulator` for the exposure
#' of interest
#' @param outcome_sim the result of `create_model_simulator` for the outcome
#' of interest
#' @export
create_simulator <- function(exposure_sim, outcome_sim){
force(exposure_sim)
force(outcome_sim)
function(newdata, exposure_newparms = NULL, outcome_newparms = NULL){
df <- newdata
df[["A"]] <- exposure_sim(df, exposure_newparms)
df %>%
{
hold <- .
dplyr::mutate(
hold,
# Update treatment of neighbors
tr_neighbors = purrr::map(
.x = neighbor_pids,
.f = ~ hold[["A"]][hold[["pid"]] %in% .x]
)
)
} %>%
dplyr::mutate(
# Proportion of boundary shared with treated units
A_tilde = purrr::map2_dbl(
.x = tr_neighbors,
.y = neighbor_boundary_lengths,
.f = function(x, y){
`if`(length(x) == 0 || sum(as.numeric(y)) == 0,
0, sum(x * y)/sum(y))
})
) ->
df
df[["Y"]] <- outcome_sim(df, outcome_newparms)
df
}
}
#' Creates a function which can produce the basis for simulation datasets
#'
#' @param basisdt a dataset containing `pid`, `geometry`, `neighbor_pids`,
#' `neighbor_boundary_lengths`
#' @export
create_sim_basis_maker <- function(basisdt){
function(seed_id, km_buffer){
loc <- sf::st_buffer(basisdt$geometry[basisdt$pid == seed_id, "geometry"],
units::as_units(km_buffer, "km"))
prd <-
drop(sf::st_intersects(basisdt$geometry, loc, sparse = FALSE)) |
drop(sf::st_within(basisdt$geometry, loc, sparse = FALSE))
basisdt[prd, ] %>%
{
hold <- .
hold %>%
dplyr::mutate(
neighbors_pids = purrr::map(
.x = neighbor_pids,
.f = ~ which(hold$pid %in% .x)
),
neighbors = purrr::map(
.x = neighbor_pids,
.f = ~ which(hold$pid %in% .x)
),
neighbor_boundary_lengths = purrr::map2(
.x = neighbor_boundary_lengths,
.y = neighbor_pids,
.f = ~ .x[.y %in% hold[["pid"]]]
)
)
}
}
}
#' Create a simulation basis dataset
#' @export
make_sim_basis_data <- function(basis_maker, id, buffer,
checker = function(hash) { FALSE },
redirector = function(out, hash) { out }){
sha <- digest::sha1(list(basis_maker, id, buffer))
if (checker(sha)) { return(invisible(NULL)) }
dt <-
basis_maker(seed_id = id, km_buffer = buffer) %>%
dplyr::select(-geometry)
out <-
list(
data = dt,
sha = sha,
id = id,
buffer = buffer,
n = nrow(dt)
)
redirector(out, sha)
}
#' Create a simulation dataset
#' @export
make_sim <- function(basedt, simulator,
parms = list(exposure_newparms = identity, outcome_newparms = identity)){
simdt <- do.call(simulator, args = c(list(newdata = basedt), parms))
list(
data = simdt,
mean_A = mean(simdt$A),
mean_A_tilde = mean(simdt$A_tilde),
mean_Y = mean(simdt$Y),
parms = parms
)
} |
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/rbokeh-package.R
\name{\%>\%}
\alias{\%>\%}
\title{Pipe figures}
\arguments{
\item{lhs}{a Bokeh figure}
\item{rhs}{a layer to add to the figure}
}
\description{
Pipe figures
}
| /man/pipe.Rd | permissive | lhsego/rbokeh | R | false | false | 264 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/rbokeh-package.R
\name{\%>\%}
\alias{\%>\%}
\title{Pipe figures}
\arguments{
\item{lhs}{a Bokeh figure}
\item{rhs}{a layer to add to the figure}
}
\description{
Pipe figures
}
|
# Working
## This script:
# Loads a current .csv spreadsheet of logsheet information from the data center.
# Finds all the upem processed files in the dropbox and matches the data file names to the logsheet file names
# Checks a few things in the files before processing. If any of the errors below are encountered, information about the file is passed to a separate file, "problem_files":
## Does the logsheet info have a matching upem datafile? (If not, need to locate the data file. File "no_match" is generated to summarize these.)
## Are the dates/times in the data file recognizable? (If not, this might mistakenly be a raw file labeled as a pre-processed file)
## Does the first hepa session end after the data begins, and does the second session begin before the data file ends?
# The script then processes the micropem files. This code is copied from the shiny app. Right now it is only saving the summary and minute averages for each file, but could easily be tweaked to save plots, etc.
# It also creates a summary table of all the files that were run, and plots histograms for them.
# If R encounters an error while processing the file, the file is skipped. Information about the skipped file is passed to "problem_files".
# All generated files are saved to your default R directory
#### LOADING REQUIRED PACKAGES AND FUNCTIONS ####
require(shiny)
require(ggplot2)
require(xts)
require(zoo)
require(plyr)
require(scales)
require(grid)
require(lubridate)
# function to put values in format xx.xx
timeformat <- function(x){
x <- sprintf("%.2f", x)
return(x)
}
# function to replace "0.00" with NA
zero2na <- function(x){
z <- gsub("\\s+", "0.00", x) # "\\s+" means match all
x[z=="0.00"] <- NA
return(x)
}
# function to convert blanks to NA
blank2na <- function(x){
z <- gsub("\\s+", "", x) #make sure it's "" and not " " etc
x[z==""] <- NA
return(x)
}
#### SETTING COMPLIANCE THRESHOLD #####
compliance_threshold <- 0.02
##### SETTING TIMEZONE #####
timezone <- "GMT"
######### PROCESSING LOGSHEET INFO -----
### FIND THE DATA FILES ###
#create a vector of upem file names
files<-list.files("~/Dropbox/Ghana_exposure_data_SHARED (1)/",recursive=T,pattern="UGF[[:alnum:]]*_[[:alnum:]]*_", full.names=T) # this grabs the preprocessed upem files rather than the raw ones, plus the logsheets
logsheets <- files[grep("logsheet", files)] # separates out the logsheets
files <- files[!(files %in% logsheets)] # removes the logsheets from the files
length(files)
# get logsheet from data centre
logsheet_data <- read.csv("~/Dropbox/Ghana project/microPEM_logsheets_through_late_feb_2014.csv", stringsAsFactors = FALSE) # replace filepath with the latest file
# assemble required data from logsheet_data
# Mstudyid, Upemid, Filterid, Fieldsetd, Thepaon1, Thepaoff1, Pickupdtd, Upemun, NComment, Thepaon2, Thepaoff2, Comments
loginfo <- logsheet_data[, c(3, 16:17, 25, 27:29, 31:32, 35:36, 41, 13)]
# format the HEPA session times as needed for xts subsetting
loginfo[, c(5:6, 10:11)] <- lapply(X = loginfo[, c(5:6, 10:11)], FUN = timeformat)
loginfo[, c(5:6, 10:11)] <- lapply(X = loginfo[, c(5:6, 10:11)], FUN = zero2na)
loginfo$hepatimes1 <- paste0(mdy_hm(paste(loginfo$Fieldsetd, loginfo$Thepaon1)), "/", mdy_hm(paste(loginfo$Fieldsetd, loginfo$Thepaoff1), tz = timezone))
loginfo$hepatimes2 <- paste0(mdy_hm(paste(loginfo$Pickupdtd, loginfo$Thepaon2)), "/", mdy_hm(paste(loginfo$Pickupdtd, loginfo$Thepaoff2), tz = timezone))
loginfo$hepatimes1[grep("NA",loginfo$hepatimes1)] <- NA
loginfo$hepatimes2[grep("NA",loginfo$hepatimes2)] <- NA
# matching files to loginfo
matched_files <- as.data.frame(files[substr(gsub("^.*KHC", "KHC", files), 1,7) %in% loginfo$Filterid])
matched_files[,2] <- substr(gsub("^.*KHC", "KHC", matched_files[,1]), 1,7)
colnames(matched_files) <- c("datafile", "Filterid")
loginfo <- merge(loginfo, matched_files, by = "Filterid", all = TRUE)
# set aside the unmatched datafiles
no_match <- loginfo[is.na(loginfo$datafile),]
no_match$problem <- "no matching datafile"
# remove the files with no match from loginfo
loginfo <- loginfo[!is.na(loginfo$datafile),]
# sort loginfo chronologically by date of pickup
loginfo <- loginfo[order(mdy(loginfo$Pickupdtd)),]
# ### Check alignment of HEPATIMES with data start/end times ###
# NOTE this step takes quite a while, as it loads each data file in separately. Could alter the "i in" statement if you are only processing a subset of files rather than the whole lot of them.
for (i in 1:nrow(loginfo)) {
filter <- loginfo$Filterid[i]
data <- read.csv(as.character(loginfo$datafile[i]),col.names = c("Date","Time", "RH.Corrected Nephelometer" ,"Temp", "RH", "Battery", "Inlet.Press", "Flow.Orifice.Press", "Flow", "X.axis", "Y.axis", "Z.axis", "Vector.Sum.Composite", "Stop.Descriptions" ), header=F, sep=",", fill=T, stringsAsFactors=FALSE) # IMPORTING ACTUAL DATASET INTO R.
data <- data[25:nrow(data),]
data.withoutdesc <- data[5:nrow(data), 1:13]
data2 = data.frame(sapply(data.withoutdesc, blank2na))
for(j in 1:11){data2[,j+2] = as.numeric(levels(data2[,j+2]))[as.integer(data2[,j+2])]}
data2$Date <- as.character(data2$Date)
data2$Time <- as.character(data2$Time)
data2$datetime <- paste(data2$Date, data2$Time)
if (!is.na(mdy(data2[1,1]))) {
loginfo$dateformat[i] <- "mdy"
data2$datetime <- mdy_hms(data2$datetime, tz = timezone)
} else if (!is.na(dmy(data2[1,1]))) {
loginfo$dateformat[i] <- "dmy"
data2$datetime <- dmy_hms(data2$datetime, tz = timezone)
loginfo$data_start[i] <- data2$datetime[1]
loginfo$data_end[i] <- data2$datetime[nrow(data2)-10]
} else loginfo$dateformat[i] <- NA
if (!is.na(loginfo$dateformat[i])) {
loginfo$data_start[i] <- data2$datetime[1]
loginfo$data_end[i] <- data2$datetime[nrow(data2)-10]
loginfo$hepa1_ok[i] <- data2$datetime[10] < mdy_hm(paste(loginfo$Fieldsetd[i], loginfo$Thepaoff1[i]), tz = timezone)
loginfo$hepa2_ok[i] <- data2$datetime[(nrow(data2)-10)] > mdy_hm(paste(loginfo$Pickupdtd[i], loginfo$Thepaon2[i]), tz = timezone)
}
}
# set aside a data frame of data files that are probably raw rather than processed files & remove these from loginfo
nodateformat <- loginfo[is.na(loginfo$dateformat),]
nodateformat$problem <- "raw file?"
loginfo <- loginfo[!is.na(loginfo$dateformat),]
# set aside a data fram eof data files that have a known hepa problem, & remove these from loginfo
hepa_problem <- loginfo[(loginfo$hepa1_ok == FALSE & !is.na(loginfo$hepa1_ok) | loginfo$hepa2_ok == FALSE & !is.na(loginfo$hepa2_ok)),]
hepa_problem$problem <- "hepa problems"
loginfo <- loginfo[!(loginfo$hepa1_ok == FALSE & !is.na(loginfo$hepa1_ok) | loginfo$hepa2_ok == FALSE & !is.na(loginfo$hepa2_ok)),]
# combine the problem files
problem_files <- rbind.fill(nodateformat, hepa_problem, no_match)
loginfo$data_start <- as.POSIXct(loginfo$data_start, origin = "1970-1-1")
loginfo$data_end <- as.POSIXct(loginfo$data_end, origin = "1970-1-1")
problem_files$data_start <- as.POSIXct(problem_files$data_start, origin = "1970-1-1")
problem_files$data_end <- as.POSIXct(problem_files$data_end, origin = "1970-1-1")
# save loginfo and problem_files to file
write.csv(loginfo, file = paste0("loginfo_",Sys.Date(),".csv"), row.names = FALSE)
write.csv(problem_files, file = paste0("problem_files_", Sys.Date()), row.names = FALSE)
# TO DO - grp the study IDs out of "files" and look for matches with problem_files
no_match <- problem_files[is.na(problem_files$datafile),]
no_match$problem <- "no matching datafile"
no_match$datafile <- as.character(no_match$datafile)
ID_matched_files <- as.data.frame(files[substr(gsub("^.*BM", "BM", files), 1,7) %in% no_match$Mstudyid], stringsAsFactors = FALSE)
ID_matched_files[,2] <- substr(gsub("^.*BM", "BM", ID_matched_files[,1]), 1,7)
colnames(ID_matched_files) <- c("datafile", "Mstudyid")
no_match$datafile[no_match$Mstudyid %in% ID_matched_files$Mstudyid] <- ID_matched_files$datafile
# 2 files found
# Print the number of files with problems
print(paste0(nrow(problem_files), " files have problems, see the file: problem_files_", Sys.Date()))
#### END LOGSHEET PROCESSING----
### PROCESS THE GOOD DATA --------
loginfo <- read.csv("~/Dropbox/Ghana project/Ghana R stuff/loginfo_2014-04-05.csv", stringsAsFactors = FALSE) # could be any log file here, make sure stringsAsFactors is set to FALSE
loginfo <- loginfo[,2:22]
fixed_files <- read.csv("~/Dropbox/Ghana project/Ghana R stuff/MicroPEM_logsheet_data_FIXED 2014-04-06.csv", stringsAsFactors = FALSE)
loginfo <- rbind.fill(loginfo, fixed_files)
not_fixed <- read.csv("~/Dropbox/Ghana project/Ghana R stuff/MicroPEM_problem_files_not_fixed 2014-04-06.csv", stringsAsFactors = FALSE)
loginfo <- rbind.fill(loginfo, not_fixed)
sum(duplicated(loginfo$Filterid))
loginfo <- loginfo[!duplicated(loginfo$Filterid),] # get rid of duplicates
### CREATING BLANK SUMMARY TABLE ###
summary_table <- data.frame(stringsAsFactors = FALSE)
for (n in 1:nrow(loginfo)) { # would be n in nrow(loginfo)
ErrorHandler <- tryCatch({
HEPATIMES <- matrix(data = NA, nrow = 2, ncol = 1, byrow = TRUE)
HEPATIMES[1,1] <- loginfo$hepatimes1[n]
HEPATIMES[2,1] <- loginfo$hepatimes2[n]
subject <- loginfo$Mstudyid[n]
session <- paste0("S_", loginfo$Vround[n])
filter <- loginfo$Filterid[n]
window_width <- 10
Sys.setenv(TZ = timezone)
data <- read.csv(as.character(loginfo$datafile[n]),col.names = c("Date","Time", "RH.Corrected Nephelometer" ,"Temp", "RH", "Battery", "Inlet.Press", "Flow.Orifice.Press", "Flow", "X.axis", "Y.axis", "Z.axis", "Vector.Sum.Composite", "Stop.Descriptions" ), header=F, sep=",", fill=T, stringsAsFactors=FALSE) # IMPORTING ACTUAL DATASET INTO R.
data_header <- data[1:22,1:7]
colnames(data_header) <- c("variable", "V1", "V2", "V3", "V4", "V5", "V6")
serialnumber <- paste("PM", as.character(sub("^......", "", data_header[4,2])), sep = "")
serialnumber_full <- data_header[4,2]
data <- data[25:nrow(data),]
stop.desc=data$Stop.Descriptions
stop.desc= cbind(data[,c(1:2)],stop.desc)
stop.desc$stop.desc = as.character(stop.desc$stop.desc)
stop.desc = data.frame(sapply(stop.desc, blank2na), stringsAsFactors = FALSE)
colnames(stop.desc) <- c("V1", "V2", "variable")
stop.desc = stop.desc[complete.cases(stop.desc),]
stop.desc <- stop.desc[,c(3,1,2)]
data.withoutdesc <- data[5:nrow(data), 1:13]
######### CREATING DATA2 DATASET - ALL DATA INCLUDING HEPA SESSIONS, RH CORRECTED. #####
data2 = data.frame(sapply(data.withoutdesc, blank2na))
for(i in 1:11){data2[,i+2] = as.numeric(levels(data2[,i+2]))[as.integer(data2[,i+2])]}
data2$Date <- as.character(data2$Date)
data2$Time <- as.character(data2$Time)
data2$datetime <- paste(data2$Date, data2$Time)
if (loginfo$dateformat[n] == "mdy") data2$datetime <- mdy_hms(data2$datetime, tz = timezone)
if (loginfo$dateformat[n] == "dmy") data2$datetime <- dmy_hms(data2$datetime, tz = timezone)
data2$RH.Corrected.Nephelometer = ifelse(data2$RH <0, NA, data2$RH.Corrected.Nephelometer) # First removes RH corrected if RH < 0
data2$RH[data2$RH < 0] = NA
data2$unique_min <- floor_date(data2$datetime, unit = "minute")
data2$unique_hour <- floor_date(data2$datetime, unit = "hour")
############# HEPA STUFF #########################
days.xts <- as.xts(data2, order.by = data2$datetime, .RECLASS = TRUE)
data2.HEPA1 <- matrix(nrow = 0, ncol = ncol(data2))
data2.HEPA2 <- matrix(nrow = 0, ncol = ncol(data2))
data2.HEPA3 <- matrix(nrow = 0, ncol = ncol(data2))
#### INDEXING HEPA INTERVALS/ REMOVING HEPA SESSIONS FROM DATA ###
NEWHEPATIMES <- HEPATIMES[!is.na(HEPATIMES)]
for (i in 1:length(NEWHEPATIMES)) {
assign(paste("data2.HEPA",i, sep = ""), days.xts[as.character(NEWHEPATIMES[i])])
}
if (length(NEWHEPATIMES) ==1) {
days.xts2 <- days.xts[!time(days.xts) %in% index(data2.HEPA1)]} else
if (length(NEWHEPATIMES) ==2) {
days.xts2 <- days.xts[!time(days.xts) %in% index(data2.HEPA1) & !time(days.xts) %in% index(data2.HEPA2)]} else
if (length(NEWHEPATIMES) ==3) {
days.xts2 <- days.xts[!time(days.xts) %in% index(data2.HEPA1) & !time(days.xts) %in% index(data2.HEPA2) & !time(days.xts) %in% index(data2.HEPA3)]
# }
} else
days.xts2 <- days.xts
#### CALCULATING AVERAGE NEPHELOMETER VALUES DURING HEPA SESSIONS (1ST AND LAST MINUTES TRIMMED TO ACCOUNT FOR POTENTIAL TIME SYNC MISMATCH) ####
HEPA1.nephelometer <- NA
HEPA2.nephelometer <- NA
HEPA3.nephelometer <- NA
if(!is.na(HEPATIMES[1,1])) {
NEWHEPATIMES <- HEPATIMES[!is.na(HEPATIMES)]
for (i in 1:length(NEWHEPATIMES)) {
if (i >=1) {
data2.HEPA1_trim = data2.HEPA1[!time(data2.HEPA1) %in% index(first(data2.HEPA1, '1 minute')) & !time(data2.HEPA1) %in% index(last(data2.HEPA1, '1 minute'))]
data2.HEPA1_trim = as.data.frame(data2.HEPA1_trim, stringsAsFactors = FALSE)
HEPA1.nephelometer = round(mean(as.numeric(data2.HEPA1_trim$RH.Corrected.Nephelometer), na.rm=TRUE), digits = 2)} else (HEPA1.nephelometer <- NA)
if( i>=2) {
data2.HEPA2_trim = data2.HEPA2[!time(data2.HEPA2) %in% index(first(data2.HEPA2, '1 minute')) & !time(data2.HEPA2) %in% index(last(data2.HEPA2, '1 minute'))]
data2.HEPA2_trim = as.data.frame(data2.HEPA2_trim, stringsAsFactors = FALSE)
HEPA2.nephelometer = round(mean(as.numeric(data2.HEPA2_trim$RH.Corrected.Nephelometer), na.rm=TRUE), digits = 2)} else (HEPA2.nephelometer <- NA)
}}
### CREATING DATASET OF HEPA SESSION INFO ####
data2.HEPA1 <- as.data.frame(data2.HEPA1, stringsAsFactors = FALSE)
data2.HEPA2 <- as.data.frame(data2.HEPA2, stringsAsFactors = FALSE)
data2.HEPA3 <- as.data.frame(data2.HEPA3, stringsAsFactors = FALSE)
hepainfo <- rbind(data2.HEPA1, data2.HEPA2, data2.HEPA3)
###### CREATING "ACTIVE DATA" DATASET (HEPA SESSIONS REMOVED) #########
active.data <- as.data.frame(days.xts2, stringsAsFactors = FALSE)
##### CALCULATING ACTIVE MINUTE AVERAGES #####
active.minute.average = ddply(active.data, .(unique_min), summarise,
RH.Corrected.Nephelometer = round(mean(as.numeric(RH.Corrected.Nephelometer), na.rm=TRUE), digits = 3),
Temp = round(mean(as.numeric(Temp), na.rm=TRUE), digits = 3),
RH = round(mean(as.numeric(RH), na.rm=TRUE), digits = 3),
Battery = round(mean(as.numeric(Battery), na.rm=TRUE), digits = 3),
Inlet.Press = round(mean(as.numeric(Inlet.Press), na.rm=TRUE), digits = 3),
Flow.Orifice.Press = round(mean(as.numeric(Flow.Orifice.Press), na.rm=TRUE), digits = 3),
Flow = round(mean(as.numeric(Flow), na.rm=TRUE), digits = 3),
X.axis_mean = round(mean(as.numeric(X.axis), na.rm=TRUE), digits = 4),
Y.axis_mean = round(mean(as.numeric(Y.axis), na.rm=TRUE), digits = 4),
Z.axis_mean = round(mean(as.numeric(Z.axis), na.rm=TRUE), digits = 4),
Vector.Sum.Composite_mean = round(mean(as.numeric(Vector.Sum.Composite), na.rm=TRUE), digits = 4),
X.axis_SD = round(sd(X.axis, na.rm=TRUE), digits = 3),
Y.axis_SD = round(sd(Y.axis, na.rm=TRUE), digits = 3),
Z.axis_SD = round(sd(Z.axis, na.rm=TRUE), digits = 3),
Vector.Sum.Composite_SD = round(sd(Vector.Sum.Composite, na.rm=TRUE), digits = 3),
unique_hour = unique_hour[1])
#### ADDING COMPLIANCE CRITERIA ###
active.minute.average$sd_composite_above_threshold = ifelse(active.minute.average$Vector.Sum.Composite_SD > compliance_threshold, 1, 0)
active.minute.average$sd_x_above_threshold = ifelse(active.minute.average$X.axis_SD > compliance_threshold, 1, 0)
active.minute.average$sd_y_above_threshold = ifelse(active.minute.average$Y.axis_SD > compliance_threshold, 1, 0)
active.minute.average$sd_z_above_threshold = ifelse(active.minute.average$Z.axis_SD > compliance_threshold, 1, 0)
active.minute.average$sd_composite_rollmean <- as.numeric(rollapply(active.minute.average$sd_composite_above_threshold, width=window_width, FUN = mean, align = "center", na.rm = TRUE, fill = NA)) ## **** NOTE **** To change the width of the rolling mean window for compliance, change the parameter for "width" w.
active.minute.average$compliance_rollmean <- ifelse(active.minute.average$sd_composite_rollmean > 0, 1, 0)
if (sum(!is.na(active.minute.average$compliance_rollmean)) > 0) {
active.minute.average.complete <- active.minute.average[complete.cases(active.minute.average),]
} else {
active.minute.average.complete <- active.minute.average
}
### SUBSETTING INTO 24 HOUR PERIODS ###
active.minute.average.complete$unique_min <- ymd_hms(active.minute.average.complete$unique_min, tz = timezone)
no.days <- ceiling(as.numeric(as.duration(active.minute.average.complete$unique_min[nrow(active.minute.average.complete)] - active.minute.average.complete$unique_min[1]))/86400) # calculates the difference in time between last and first datetime observation (in seconds), transforms it into days and returns the ceiling of days
dayindex <- active.minute.average.complete$unique_min[1] + hours(seq(from = 24, to = no.days*24, by = 24))
active.minute.average.complete$unique_24h <- 1
for (i in 1:no.days) {
active.minute.average.complete$unique_24h <- ifelse ((active.minute.average.complete$unique_min > dayindex[i]), i+1, active.minute.average.complete$unique_24h)
}
#### CALCULATING HOUR AVERAGES ####
active.hour.average = ddply(active.minute.average.complete, .(unique_hour), summarise,
RH.Corrected.Nephelometer = round(mean(RH.Corrected.Nephelometer, na.rm=TRUE), digits = 3),
Temp = round(mean(Temp, na.rm=TRUE), digits = 3),
RH = round(mean(RH, na.rm=TRUE), digits = 3),
Battery = round(mean(Battery, na.rm=TRUE), digits = 3),
Inlet.Press = round(mean(Inlet.Press, na.rm=TRUE), digits = 3),
Flow.Orifice.Press = round(mean(Flow.Orifice.Press, na.rm=TRUE), digits = 3),
Flow = round(mean(Flow, na.rm=TRUE), digits = 3),
count_composite_above_threshold = sum(sd_composite_above_threshold, na.rm=TRUE),
percent_composite_above_threshold = round(mean(sd_composite_above_threshold, na.rm=TRUE), digits = 3),
x_above_threshold = sum(sd_x_above_threshold, na.rm=TRUE),
x_percent_above_threshold = round(mean(sd_x_above_threshold, na.rm=TRUE), digits = 3),
y_above_threshold = sum(sd_y_above_threshold, na.rm=TRUE),
y_percent_above_threshold = round(mean(sd_y_above_threshold, na.rm=TRUE), digits = 3),
z_above_threshold = sum(sd_z_above_threshold, na.rm=TRUE),
z_percent_above_threshold = round(mean(sd_z_above_threshold, na.rm=TRUE), digits = 3),
total_minutes_observation = length(unique_min),
proportion_compliance_rollmean = round(sum(compliance_rollmean, na.rm = TRUE)/60, digits = 3),
datetime = unique_min[1],
unique_24h = unique_24h[1])
###### CALCULATING 24-HOUR AVERAGES #####
active.day.average = ddply(active.minute.average.complete, .(unique_24h), summarise,
RH.Corrected.Nephelometer = mean(RH.Corrected.Nephelometer, na.rm=TRUE),
Temp = mean(Temp, na.rm=TRUE),
RH = mean(RH, na.rm=TRUE),
Battery = mean(Battery, na.rm=TRUE),
Inlet.Press = mean(Inlet.Press, na.rm=TRUE),
Flow.Orifice.Press = mean(Flow.Orifice.Press, na.rm=TRUE),
Flow = mean(Flow, na.rm=TRUE),
count_composite_above_threshold = sum(sd_composite_above_threshold, na.rm=TRUE),
percent_composite_above_threshold = mean(sd_composite_above_threshold, na.rm=TRUE),
x_above_threshold = sum(sd_x_above_threshold, na.rm=TRUE),
x_percent_above_threshold = mean(sd_x_above_threshold, na.rm=TRUE),
y_above_threshold = sum(sd_y_above_threshold, na.rm=TRUE),
y_percent_above_threshold = mean(sd_y_above_threshold, na.rm=TRUE),
z_above_threshold = sum(sd_z_above_threshold, na.rm=TRUE),
z_percent_above_threshold = mean(sd_z_above_threshold, na.rm=TRUE),
hours_compliance_rollmean = round(sum((compliance_rollmean)/60, na.rm = TRUE), digits = 2),
total_minutes_observation = length(unique_min),
total_hours_observation = round(length(unique_min)/60, digits = 1),
datetime = unique_min[1])
####### MINUTE AVERAGE DATA ############################
active.minute.average.complete$Subject <- rep(subject)
active.minute.average.complete$Session <- rep(session)
# active.minute.average.complete <- active.minute.average.complete[,c(25:26, 1:24)]
####### HOUR AVERAGE DATA ############################
if (sum(!is.na(active.hour.average$x_percent_above_threshold))> 0) {
active.hour.average.complete <- active.hour.average[complete.cases(active.hour.average),]
} else {
active.hour.average.complete <- active.hour.average
}
active.hour.average.complete$Subject <- rep(subject)
active.hour.average.complete$Session <- rep(session)
##### ADDING HEPA CORRECTION TO NEPHELOMETER READINGS ####
## NOTE: CURRENTLY ONLY SET UP FOR ONE OR TWO HEPA SESSIONS ###
active.day.average$HEPA_corr_neph <- NA
active.minute.average.complete$HEPA_corr_neph <- NA
active.hour.average.complete$HEPA_corr_neph <- NA
# new
if (length(NEWHEPATIMES) ==1) {
HEPA_correction <- round(HEPA1.nephelometer, digits = 2)
active.day.average$HEPA_correction <- HEPA_correction
active.hour.average.complete$HEPA_correction <- HEPA_correction
active.minute.average.complete$HEPA_correction <- HEPA_correction
active.day.average$HEPA_corr_neph <- round(active.day.average$RH.Corrected.Nephelometer - active.day.average$HEPA_correction, digits = 2)
active.hour.average.complete$HEPA_corr_neph <- round(active.hour.average.complete$RH.Corrected.Nephelometer - active.hour.average.complete$HEPA_correction, digits = 3)
active.minute.average.complete$HEPA_corr_neph <- round(active.minute.average.complete$RH.Corrected.Nephelometer - active.minute.average.complete$HEPA_correction, digits = 3)
} else
# end new
if (length(NEWHEPATIMES) ==2) {
HEPA_correction <- seq(HEPA1.nephelometer, HEPA2.nephelometer, length.out= nrow(active.day.average)) # length = number of days sampled
active.day.average$HEPA_correction <- round(HEPA_correction, digits = 2)
for (i in 1:nrow(active.day.average)) {
active.day.average$HEPA_corr_neph[i] <- round(active.day.average$RH.Corrected.Nephelometer[i] - HEPA_correction[i], digits = 2) # why not just subtract the vectors?
active.minute.average.complete$HEPA_correction[active.minute.average.complete$unique_24h ==i] <- round(HEPA_correction[i], digits = 2) # sets up one HEPA correction value per 24 hours
active.minute.average.complete$HEPA_corr_neph <- round(active.minute.average.complete$RH.Corrected.Nephelometer - active.minute.average.complete$HEPA_correction, digits = 3)
active.hour.average.complete$HEPA_correction[active.hour.average.complete$unique_24h==i] <- round(HEPA_correction[i], digits = 2)
active.hour.average.complete$HEPA_corr_neph <- round(active.hour.average.complete$RH.Corrected.Nephelometer - active.hour.average.complete$HEPA_correction, digits = 3)
}} else
{ active.day.average$HEPA_correction <- NA
active.minute.average.complete$HEPA_correction <- NA
active.hour.average.complete$HEPA_correction <- NA
}
### NEW ###
### CALCULATING DELTA PRESSURE ###
if(!is.na(active.minute.average.complete$HEPA_corr_neph[1])) {
dep_rate <- active.minute.average.complete$HEPA_corr_neph * active.minute.average.complete$Flow * 1/1000 } else
{ dep_rate <- active.minute.average.complete$RH.Corrected.Nephelometer * active.minute.average.complete$Flow * 1/1000 }
active.minute.average.complete$cumulative_dep <- cumsum(dep_rate)
active.hour.average.complete$cumulative_dep <- round(tapply(X = active.minute.average.complete$cumulative_dep, INDEX = active.minute.average.complete$unique_hour, FUN = mean), digits = 3)
active.day.average$cumulative_dep <- round(tapply(X = active.minute.average.complete$cumulative_dep, INDEX = active.minute.average.complete$unique_24h, FUN = mean), digits = 2)
active.minute.average.complete$delta_pressure <- round(active.minute.average.complete$Inlet.Press - active.minute.average.complete$Inlet.Press[1], digits = 3)*2
# note - multiplying the Inlet Pressure x2 as per RTI
active.hour.average.complete$delta_pressure <- round(tapply(X = active.minute.average.complete$delta_pressure, INDEX = active.minute.average.complete$unique_hour, FUN = mean), digits = 3)
active.day.average$delta_pressure <- round(tapply(X = active.minute.average.complete$delta_pressure, INDEX = active.minute.average.complete$unique_24h, FUN = mean), digits = 2)
active.day.average$unique_24h <- paste("Day", active.day.average$unique_24h)
active.day.average$proportion_compliance_all <- NA
active.day.average$proportion_compliance_all <- ifelse(active.day.average$total_hours_observation ==0, NA, round(active.day.average$hours_compliance_rollmean/active.day.average$total_hours_observation, digits = 3))
active.day.average$proportion_compliance_all <- ifelse(is.na(active.day.average$percent_composite_above_threshold), NA, active.day.average$proportion_compliance_all)
################### SUMMARY DATA ###########################
##### TOTAL RUN TIME ####################
total_time <- sum(active.day.average$total_hours_observation)
total_time_minutes <- sum(active.day.average$total_minutes_observation)
total_minutes_worn <- sum(active.minute.average.complete$compliance_rollmean, na.rm = TRUE)
### START DATE & TIME ###
start_time = format(active.minute.average.complete$unique_min[1], format = "%d%b%y %H:%M:%S")
### STOP DATE & TIME ###
stop_time = format(active.minute.average.complete$unique_min[nrow(active.minute.average.complete)], format = "%d%b%y %H:%M:%S") # due to NAs at end of rolling mean for compliance, last minutes will be truncated
#### GENERATING ID FOR FILENAMES #####
lastdate <- substr(stop_time,1,7)
ID <- paste(subject, lastdate, session, serialnumber, filter, sep = "_")
#### AVERAGE ACTIVE SAMPLE NEPHELOMETER ####
average_sample_nephelometer = round(mean(as.numeric(active.data$RH.Corrected.Nephelometer), na.rm=TRUE), digits = 2)
average_sample_nephelometer_hepacorr <- round(mean(active.day.average$HEPA_corr_neph, na.rm= TRUE), digits = 2)
###### VOLTAGE DROP PER HOUR #####
# (Vb-Ve)*1000 mV/V ÷ (hours ran/2) (adjust for 50% duty cycle)- this number should be < 30 mV/hr for best pumps
voltage_b <- active.hour.average.complete$Battery[1]
voltage_e <- active.hour.average.complete$Battery[length(active.hour.average.complete)]
voltage_drop <- (voltage_b-voltage_e)*1000 / (total_time/2)
#### DATA SUMMARY #######
# active.data_summary = matrix(c(as.character(filter),
# serialnumber_full,
# round(total_time),
# round(total_time_minutes),
# as.character(start_time),
# as.character(stop_time),
# timezone,
# round(average_sample_nephelometer, digits = 2),
# average_sample_nephelometer_hepacorr,
# HEPATIMES,
# round(HEPA1.nephelometer, digits = 2),
# round(HEPA2.nephelometer, digits = 2),
# compliance_threshold,
# sum(active.minute.average$sd_composite_above_threshold, na.rm=TRUE),
# round(total_minutes_worn/60),
# round(mean(active.day.average$proportion_compliance_all, na.rm = TRUE)*100, digits =1),
# round(voltage_drop, digits = 2)),
# ncol = 1)
#
#
# active.data_summary = data.frame(active.data_summary, stringsAsFactors = FALSE)
# active.data_summary$variable = c("Filter", "Serialnumber", "Total Sampling Time (hrs)", "Total Sampling Time (mins)", "Start Time", "Stop Time", "Timezone", "Mean Active Nephelometer (ug/m^3)", "Mean Active Neph, HEPA corr (ug/m^3)", "HEPA1_times (start/stop)", "HEPA2_times (start/stop)",
# "Mean HEPA1 Nephelometer (ug/m^3)", "Mean HEPA2 Nephelometer (ug/m^3)", "Compliance Threshold for minutewise SD",
# "Total Time Composite SD>Threshold (mins)", "Total Hours Worn (hrs)", "Percent of Hours Worn (%)", "Avg Voltage Drop per Hour (mV/hr)")
# colnames(active.data_summary)[1] <- "V1"
active.data_summary2 = data.frame("Filter" = as.character(filter),
"Serialnumber" = serialnumber_full,
"Total Sampling Time.hrs" = round(total_time),
"Total Sampling Time.mins" = round(total_time_minutes),
"Start Time" = as.character(start_time),
"Stop Time" = as.character(stop_time),
"Timezone" = timezone,
"Mean Active Nephelometer.ug/m^3" = round(average_sample_nephelometer, digits = 2),
"Mean Active Neph HEPA corr.ug/m^3" = average_sample_nephelometer_hepacorr,
"HEPA1_times.start/stop" = HEPATIMES[1,1],
"HEPA2_times.start/stop" = HEPATIMES[2,1],
"Mean HEPA1 Nephelometer.ug/m^3" = round(HEPA1.nephelometer, digits = 2),
"Mean HEPA2 Nephelometer.ug/m^3" = round(HEPA2.nephelometer, digits = 2),
"Compliance Threshold for minutewise SD" = compliance_threshold,
"Total Time Composite SD over Threshold.mins" = sum(active.minute.average$sd_composite_above_threshold, na.rm=TRUE),
"Total Hours Worn.hrs" = round(total_minutes_worn/60),
"Percent of Hours Worn" = round(mean(active.day.average$proportion_compliance_all, na.rm = TRUE)*100, digits =1),
"Avg Voltage Drop.mV/hr" = round(voltage_drop, digits = 2))
active.data_summary <- data.frame("variable" = colnames(active.data_summary2), "V1" = t(active.data_summary2))
summary_24 <- as.matrix(t(active.day.average))
summary_24[2:18,] <- round(as.numeric(summary_24[2:18,]), digits = 2)
summary_24 <- as.data.frame(summary_24, stringsAsFactors = FALSE)
summary_24$variable <- as.character(colnames(active.day.average))
summary_24 <- summary_24[c(1,20,2, 22,21,3:19,23),]
summary_24$variable <- c("Unique 24-hour period", "24-hour period start date", "RH-Corrected Nephelometer (ug/m^3)", "HEPA Correction (ug/m^3)", "HEPA-Corrected Nephelometer (ug/m^3)", "Temp (C)", "RH (%)", "Battery (V)",
"Inlet.Pressure (H20)",
"Flow.Orifice.Pressure (H20)",
"Flow (Lpm)",
"count_composite_above_threshold (mins)",
"percent_composite_above_threshold (%)",
"x_above_threshold (mins)",
"x_percent_above_threshold (%)",
"y_above_threshold(mins)",
"y_percent_above_threshold (%)",
"z_above_threshold (mins)",
"z_percent_above_threshold (%)",
"Hours of Compliance (hrs, by rolling mean)",
"Active Sampling Minutes (mins)",
"Active Sampling Hours (hrs)",
"Percent of Hours Worn (%)")
labels_setup <- as.data.frame(t(rep("**SETUP**", 7)), stringsAsFactors = FALSE)
colnames(labels_setup)[7] <- "variable"
labels_summary <- as.data.frame(t(rep("**OVERALL.SUMMARY**", 7)), stringsAsFactors = FALSE)
colnames(labels_summary)[7] <- "variable"
labels_desc <- as.data.frame(t(rep("**EQUIPMENT.LOG**", 7)), stringsAsFactors = FALSE)
colnames(labels_desc)[7] <- "variable"
labels_24 <- as.data.frame(t(rep("**24.HOUR.SUMMARY**",7)), stringsAsFactors = FALSE)
colnames(labels_24)[7] <- "variable"
summary <- rbind.fill( labels_summary[,c(7,1:6)], active.data_summary, labels_24,summary_24, labels_setup, data_header,labels_desc, stop.desc)
# summary$Permanent_ID <- rep(permID)
summary$Subject <- rep(subject)
summary$Session <- rep(session)
summary <- summary[,c(8,9, 1:7)]
},
error = function(e) e
)
if(inherits(ErrorHandler, "error")) {
print(paste0("Not processed due to errors: ", filter))
next }
# save the summary
write.csv(summary, file = paste0(ID,"_MicroPEM_Summary.csv"))
# save the minute data
write.csv(active.minute.average.complete, file = paste0(ID, "_Data_Minute_Averages.csv"), row.names = F)
# add by-day compliance numbers to the summary
by_day_compliance <- as.data.frame(summary_24[20,1:(length(summary_24[20,]) - 1)])
for (i in 1:ncol(by_day_compliance)) {
colnames(by_day_compliance)[i] <- paste0("Day", i, "Compliance.hrs")
}
summary_table_n <- cbind(active.data_summary2, by_day_compliance)
summary_table <- rbind.fill(summary_table, summary_table_n)
}
colnames(summary_table) <- c("Filter", "Serialnumber", "Total Sampling Time (hrs)", "Total Sampling Time (mins)", "Start Time", "Stop Time", "Timezone", "Mean Active Nephelometer (ug/m^3)", "Mean Active Neph, HEPA corr (ug/m^3)", "HEPA1_times (start/stop)", "HEPA2_times (start/stop)", "Mean HEPA1 Nephelometer (ug/m^3)", "Mean HEPA2 Nephelometer (ug/m^3)", "Compliance Threshold for minutewise SD",
"Total Time Composite SD>Threshold (mins)", "Total Hours Worn (hrs)", "Percent of Hours Worn (%)", "Avg Voltage Drop per Hour (mV/hr)", "Day1Compliance.hrs", "Day2Compliance.hrs", "Day3Compliance.hrs")
not_processed <- loginfo[!loginfo$Filterid %in% summary_table$Filter,]
not_processed$problem <- "not processed"
problem_files <- rbind(problem_files, not_processed)
### DO THESE PARTS ONLY WHEN YOU WANT TO SAVE THE TABLES, give appropriate filenames ----
# save the problem files
write.csv(problem_files, file = paste0("MicroPEM_problem_files_", Sys.Date(), ".csv"), row.names = FALSE)
# save the summary table to file
write.csv(summary_table, file = paste0("MicroPEM_summary_table_", Sys.Date(), ".csv"), row.names = FALSE)
| /Micropem_data_analysis/micropem_batch_processing_datacentre_trycatch.R | no_license | ashlinn/GRAPHS_exposure_data | R | false | false | 36,269 | r | # Working
## This script:
# Loads a current .csv spreadsheet of logsheet information from the data center.
# Finds all the upem processed files in the dropbox and matches the data file names to the logsheet file names
# Checks a few things in the files before processing. If any of the errors below are encountered, information about the file is passed to a separate file, "problem_files":
## Does the logsheet info have a matching upem datafile? (If not, need to locate the data file. File "no_match" is generated to summarize these.)
## Are the dates/times in the data file recognizable? (If not, this might mistakenly be a raw file labeled as a pre-processed file)
## Does the first hepa session end after the data begins, and does the second session begin before the data file ends?
# The script then processes the micropem files. This code is copied from the shiny app. Right now it is only saving the summary and minute averages for each file, but could easily be tweaked to save plots, etc.
# It also creates a summary table of all the files that were run, and plots histograms for them.
# If R encounters an error while processing the file, the file is skipped. Information about the skipped file is passed to "problem_files".
# All generated files are saved to your default R directory
#### LOADING REQUIRED PACKAGES AND FUNCTIONS ####
require(shiny)
require(ggplot2)
require(xts)
require(zoo)
require(plyr)
require(scales)
require(grid)
require(lubridate)
# function to put values in format xx.xx
timeformat <- function(x){
x <- sprintf("%.2f", x)
return(x)
}
# function to replace "0.00" with NA
zero2na <- function(x){
z <- gsub("\\s+", "0.00", x) # "\\s+" means match all
x[z=="0.00"] <- NA
return(x)
}
# function to convert blanks to NA
blank2na <- function(x){
z <- gsub("\\s+", "", x) #make sure it's "" and not " " etc
x[z==""] <- NA
return(x)
}
#### SETTING COMPLIANCE THRESHOLD #####
compliance_threshold <- 0.02
##### SETTING TIMEZONE #####
timezone <- "GMT"
######### PROCESSING LOGSHEET INFO -----
### FIND THE DATA FILES ###
#create a vector of upem file names
files<-list.files("~/Dropbox/Ghana_exposure_data_SHARED (1)/",recursive=T,pattern="UGF[[:alnum:]]*_[[:alnum:]]*_", full.names=T) # this grabs the preprocessed upem files rather than the raw ones, plus the logsheets
logsheets <- files[grep("logsheet", files)] # separates out the logsheets
files <- files[!(files %in% logsheets)] # removes the logsheets from the files
length(files)
# get logsheet from data centre
logsheet_data <- read.csv("~/Dropbox/Ghana project/microPEM_logsheets_through_late_feb_2014.csv", stringsAsFactors = FALSE) # replace filepath with the latest file
# assemble required data from logsheet_data
# Mstudyid, Upemid, Filterid, Fieldsetd, Thepaon1, Thepaoff1, Pickupdtd, Upemun, NComment, Thepaon2, Thepaoff2, Comments
loginfo <- logsheet_data[, c(3, 16:17, 25, 27:29, 31:32, 35:36, 41, 13)]
# format the HEPA session times as needed for xts subsetting
loginfo[, c(5:6, 10:11)] <- lapply(X = loginfo[, c(5:6, 10:11)], FUN = timeformat)
loginfo[, c(5:6, 10:11)] <- lapply(X = loginfo[, c(5:6, 10:11)], FUN = zero2na)
loginfo$hepatimes1 <- paste0(mdy_hm(paste(loginfo$Fieldsetd, loginfo$Thepaon1)), "/", mdy_hm(paste(loginfo$Fieldsetd, loginfo$Thepaoff1), tz = timezone))
loginfo$hepatimes2 <- paste0(mdy_hm(paste(loginfo$Pickupdtd, loginfo$Thepaon2)), "/", mdy_hm(paste(loginfo$Pickupdtd, loginfo$Thepaoff2), tz = timezone))
loginfo$hepatimes1[grep("NA",loginfo$hepatimes1)] <- NA
loginfo$hepatimes2[grep("NA",loginfo$hepatimes2)] <- NA
# matching files to loginfo
matched_files <- as.data.frame(files[substr(gsub("^.*KHC", "KHC", files), 1,7) %in% loginfo$Filterid])
matched_files[,2] <- substr(gsub("^.*KHC", "KHC", matched_files[,1]), 1,7)
colnames(matched_files) <- c("datafile", "Filterid")
loginfo <- merge(loginfo, matched_files, by = "Filterid", all = TRUE)
# set aside the unmatched datafiles
no_match <- loginfo[is.na(loginfo$datafile),]
no_match$problem <- "no matching datafile"
# remove the files with no match from loginfo
loginfo <- loginfo[!is.na(loginfo$datafile),]
# sort loginfo chronologically by date of pickup
loginfo <- loginfo[order(mdy(loginfo$Pickupdtd)),]
# ### Check alignment of HEPATIMES with data start/end times ###
# NOTE this step takes quite a while, as it loads each data file in separately. Could alter the "i in" statement if you are only processing a subset of files rather than the whole lot of them.
for (i in 1:nrow(loginfo)) {
filter <- loginfo$Filterid[i]
data <- read.csv(as.character(loginfo$datafile[i]),col.names = c("Date","Time", "RH.Corrected Nephelometer" ,"Temp", "RH", "Battery", "Inlet.Press", "Flow.Orifice.Press", "Flow", "X.axis", "Y.axis", "Z.axis", "Vector.Sum.Composite", "Stop.Descriptions" ), header=F, sep=",", fill=T, stringsAsFactors=FALSE) # IMPORTING ACTUAL DATASET INTO R.
data <- data[25:nrow(data),]
data.withoutdesc <- data[5:nrow(data), 1:13]
data2 = data.frame(sapply(data.withoutdesc, blank2na))
for(j in 1:11){data2[,j+2] = as.numeric(levels(data2[,j+2]))[as.integer(data2[,j+2])]}
data2$Date <- as.character(data2$Date)
data2$Time <- as.character(data2$Time)
data2$datetime <- paste(data2$Date, data2$Time)
if (!is.na(mdy(data2[1,1]))) {
loginfo$dateformat[i] <- "mdy"
data2$datetime <- mdy_hms(data2$datetime, tz = timezone)
} else if (!is.na(dmy(data2[1,1]))) {
loginfo$dateformat[i] <- "dmy"
data2$datetime <- dmy_hms(data2$datetime, tz = timezone)
loginfo$data_start[i] <- data2$datetime[1]
loginfo$data_end[i] <- data2$datetime[nrow(data2)-10]
} else loginfo$dateformat[i] <- NA
if (!is.na(loginfo$dateformat[i])) {
loginfo$data_start[i] <- data2$datetime[1]
loginfo$data_end[i] <- data2$datetime[nrow(data2)-10]
loginfo$hepa1_ok[i] <- data2$datetime[10] < mdy_hm(paste(loginfo$Fieldsetd[i], loginfo$Thepaoff1[i]), tz = timezone)
loginfo$hepa2_ok[i] <- data2$datetime[(nrow(data2)-10)] > mdy_hm(paste(loginfo$Pickupdtd[i], loginfo$Thepaon2[i]), tz = timezone)
}
}
# set aside a data frame of data files that are probably raw rather than processed files & remove these from loginfo
nodateformat <- loginfo[is.na(loginfo$dateformat),]
nodateformat$problem <- "raw file?"
loginfo <- loginfo[!is.na(loginfo$dateformat),]
# set aside a data fram eof data files that have a known hepa problem, & remove these from loginfo
hepa_problem <- loginfo[(loginfo$hepa1_ok == FALSE & !is.na(loginfo$hepa1_ok) | loginfo$hepa2_ok == FALSE & !is.na(loginfo$hepa2_ok)),]
hepa_problem$problem <- "hepa problems"
loginfo <- loginfo[!(loginfo$hepa1_ok == FALSE & !is.na(loginfo$hepa1_ok) | loginfo$hepa2_ok == FALSE & !is.na(loginfo$hepa2_ok)),]
# combine the problem files
problem_files <- rbind.fill(nodateformat, hepa_problem, no_match)
loginfo$data_start <- as.POSIXct(loginfo$data_start, origin = "1970-1-1")
loginfo$data_end <- as.POSIXct(loginfo$data_end, origin = "1970-1-1")
problem_files$data_start <- as.POSIXct(problem_files$data_start, origin = "1970-1-1")
problem_files$data_end <- as.POSIXct(problem_files$data_end, origin = "1970-1-1")
# save loginfo and problem_files to file
write.csv(loginfo, file = paste0("loginfo_",Sys.Date(),".csv"), row.names = FALSE)
write.csv(problem_files, file = paste0("problem_files_", Sys.Date()), row.names = FALSE)
# TO DO - grp the study IDs out of "files" and look for matches with problem_files
no_match <- problem_files[is.na(problem_files$datafile),]
no_match$problem <- "no matching datafile"
no_match$datafile <- as.character(no_match$datafile)
ID_matched_files <- as.data.frame(files[substr(gsub("^.*BM", "BM", files), 1,7) %in% no_match$Mstudyid], stringsAsFactors = FALSE)
ID_matched_files[,2] <- substr(gsub("^.*BM", "BM", ID_matched_files[,1]), 1,7)
colnames(ID_matched_files) <- c("datafile", "Mstudyid")
no_match$datafile[no_match$Mstudyid %in% ID_matched_files$Mstudyid] <- ID_matched_files$datafile
# 2 files found
# Print the number of files with problems
print(paste0(nrow(problem_files), " files have problems, see the file: problem_files_", Sys.Date()))
#### END LOGSHEET PROCESSING----
### PROCESS THE GOOD DATA --------
loginfo <- read.csv("~/Dropbox/Ghana project/Ghana R stuff/loginfo_2014-04-05.csv", stringsAsFactors = FALSE) # could be any log file here, make sure stringsAsFactors is set to FALSE
loginfo <- loginfo[,2:22]
fixed_files <- read.csv("~/Dropbox/Ghana project/Ghana R stuff/MicroPEM_logsheet_data_FIXED 2014-04-06.csv", stringsAsFactors = FALSE)
loginfo <- rbind.fill(loginfo, fixed_files)
not_fixed <- read.csv("~/Dropbox/Ghana project/Ghana R stuff/MicroPEM_problem_files_not_fixed 2014-04-06.csv", stringsAsFactors = FALSE)
loginfo <- rbind.fill(loginfo, not_fixed)
sum(duplicated(loginfo$Filterid))
loginfo <- loginfo[!duplicated(loginfo$Filterid),] # get rid of duplicates
### CREATING BLANK SUMMARY TABLE ###
summary_table <- data.frame(stringsAsFactors = FALSE)
for (n in 1:nrow(loginfo)) { # would be n in nrow(loginfo)
ErrorHandler <- tryCatch({
HEPATIMES <- matrix(data = NA, nrow = 2, ncol = 1, byrow = TRUE)
HEPATIMES[1,1] <- loginfo$hepatimes1[n]
HEPATIMES[2,1] <- loginfo$hepatimes2[n]
subject <- loginfo$Mstudyid[n]
session <- paste0("S_", loginfo$Vround[n])
filter <- loginfo$Filterid[n]
window_width <- 10
Sys.setenv(TZ = timezone)
data <- read.csv(as.character(loginfo$datafile[n]),col.names = c("Date","Time", "RH.Corrected Nephelometer" ,"Temp", "RH", "Battery", "Inlet.Press", "Flow.Orifice.Press", "Flow", "X.axis", "Y.axis", "Z.axis", "Vector.Sum.Composite", "Stop.Descriptions" ), header=F, sep=",", fill=T, stringsAsFactors=FALSE) # IMPORTING ACTUAL DATASET INTO R.
data_header <- data[1:22,1:7]
colnames(data_header) <- c("variable", "V1", "V2", "V3", "V4", "V5", "V6")
serialnumber <- paste("PM", as.character(sub("^......", "", data_header[4,2])), sep = "")
serialnumber_full <- data_header[4,2]
data <- data[25:nrow(data),]
stop.desc=data$Stop.Descriptions
stop.desc= cbind(data[,c(1:2)],stop.desc)
stop.desc$stop.desc = as.character(stop.desc$stop.desc)
stop.desc = data.frame(sapply(stop.desc, blank2na), stringsAsFactors = FALSE)
colnames(stop.desc) <- c("V1", "V2", "variable")
stop.desc = stop.desc[complete.cases(stop.desc),]
stop.desc <- stop.desc[,c(3,1,2)]
data.withoutdesc <- data[5:nrow(data), 1:13]
######### CREATING DATA2 DATASET - ALL DATA INCLUDING HEPA SESSIONS, RH CORRECTED. #####
data2 = data.frame(sapply(data.withoutdesc, blank2na))
for(i in 1:11){data2[,i+2] = as.numeric(levels(data2[,i+2]))[as.integer(data2[,i+2])]}
data2$Date <- as.character(data2$Date)
data2$Time <- as.character(data2$Time)
data2$datetime <- paste(data2$Date, data2$Time)
if (loginfo$dateformat[n] == "mdy") data2$datetime <- mdy_hms(data2$datetime, tz = timezone)
if (loginfo$dateformat[n] == "dmy") data2$datetime <- dmy_hms(data2$datetime, tz = timezone)
data2$RH.Corrected.Nephelometer = ifelse(data2$RH <0, NA, data2$RH.Corrected.Nephelometer) # First removes RH corrected if RH < 0
data2$RH[data2$RH < 0] = NA
data2$unique_min <- floor_date(data2$datetime, unit = "minute")
data2$unique_hour <- floor_date(data2$datetime, unit = "hour")
############# HEPA STUFF #########################
days.xts <- as.xts(data2, order.by = data2$datetime, .RECLASS = TRUE)
data2.HEPA1 <- matrix(nrow = 0, ncol = ncol(data2))
data2.HEPA2 <- matrix(nrow = 0, ncol = ncol(data2))
data2.HEPA3 <- matrix(nrow = 0, ncol = ncol(data2))
#### INDEXING HEPA INTERVALS/ REMOVING HEPA SESSIONS FROM DATA ###
NEWHEPATIMES <- HEPATIMES[!is.na(HEPATIMES)]
for (i in 1:length(NEWHEPATIMES)) {
assign(paste("data2.HEPA",i, sep = ""), days.xts[as.character(NEWHEPATIMES[i])])
}
if (length(NEWHEPATIMES) ==1) {
days.xts2 <- days.xts[!time(days.xts) %in% index(data2.HEPA1)]} else
if (length(NEWHEPATIMES) ==2) {
days.xts2 <- days.xts[!time(days.xts) %in% index(data2.HEPA1) & !time(days.xts) %in% index(data2.HEPA2)]} else
if (length(NEWHEPATIMES) ==3) {
days.xts2 <- days.xts[!time(days.xts) %in% index(data2.HEPA1) & !time(days.xts) %in% index(data2.HEPA2) & !time(days.xts) %in% index(data2.HEPA3)]
# }
} else
days.xts2 <- days.xts
#### CALCULATING AVERAGE NEPHELOMETER VALUES DURING HEPA SESSIONS (1ST AND LAST MINUTES TRIMMED TO ACCOUNT FOR POTENTIAL TIME SYNC MISMATCH) ####
HEPA1.nephelometer <- NA
HEPA2.nephelometer <- NA
HEPA3.nephelometer <- NA
if(!is.na(HEPATIMES[1,1])) {
NEWHEPATIMES <- HEPATIMES[!is.na(HEPATIMES)]
for (i in 1:length(NEWHEPATIMES)) {
if (i >=1) {
data2.HEPA1_trim = data2.HEPA1[!time(data2.HEPA1) %in% index(first(data2.HEPA1, '1 minute')) & !time(data2.HEPA1) %in% index(last(data2.HEPA1, '1 minute'))]
data2.HEPA1_trim = as.data.frame(data2.HEPA1_trim, stringsAsFactors = FALSE)
HEPA1.nephelometer = round(mean(as.numeric(data2.HEPA1_trim$RH.Corrected.Nephelometer), na.rm=TRUE), digits = 2)} else (HEPA1.nephelometer <- NA)
if( i>=2) {
data2.HEPA2_trim = data2.HEPA2[!time(data2.HEPA2) %in% index(first(data2.HEPA2, '1 minute')) & !time(data2.HEPA2) %in% index(last(data2.HEPA2, '1 minute'))]
data2.HEPA2_trim = as.data.frame(data2.HEPA2_trim, stringsAsFactors = FALSE)
HEPA2.nephelometer = round(mean(as.numeric(data2.HEPA2_trim$RH.Corrected.Nephelometer), na.rm=TRUE), digits = 2)} else (HEPA2.nephelometer <- NA)
}}
### CREATING DATASET OF HEPA SESSION INFO ####
data2.HEPA1 <- as.data.frame(data2.HEPA1, stringsAsFactors = FALSE)
data2.HEPA2 <- as.data.frame(data2.HEPA2, stringsAsFactors = FALSE)
data2.HEPA3 <- as.data.frame(data2.HEPA3, stringsAsFactors = FALSE)
hepainfo <- rbind(data2.HEPA1, data2.HEPA2, data2.HEPA3)
###### CREATING "ACTIVE DATA" DATASET (HEPA SESSIONS REMOVED) #########
active.data <- as.data.frame(days.xts2, stringsAsFactors = FALSE)
##### CALCULATING ACTIVE MINUTE AVERAGES #####
active.minute.average = ddply(active.data, .(unique_min), summarise,
RH.Corrected.Nephelometer = round(mean(as.numeric(RH.Corrected.Nephelometer), na.rm=TRUE), digits = 3),
Temp = round(mean(as.numeric(Temp), na.rm=TRUE), digits = 3),
RH = round(mean(as.numeric(RH), na.rm=TRUE), digits = 3),
Battery = round(mean(as.numeric(Battery), na.rm=TRUE), digits = 3),
Inlet.Press = round(mean(as.numeric(Inlet.Press), na.rm=TRUE), digits = 3),
Flow.Orifice.Press = round(mean(as.numeric(Flow.Orifice.Press), na.rm=TRUE), digits = 3),
Flow = round(mean(as.numeric(Flow), na.rm=TRUE), digits = 3),
X.axis_mean = round(mean(as.numeric(X.axis), na.rm=TRUE), digits = 4),
Y.axis_mean = round(mean(as.numeric(Y.axis), na.rm=TRUE), digits = 4),
Z.axis_mean = round(mean(as.numeric(Z.axis), na.rm=TRUE), digits = 4),
Vector.Sum.Composite_mean = round(mean(as.numeric(Vector.Sum.Composite), na.rm=TRUE), digits = 4),
X.axis_SD = round(sd(X.axis, na.rm=TRUE), digits = 3),
Y.axis_SD = round(sd(Y.axis, na.rm=TRUE), digits = 3),
Z.axis_SD = round(sd(Z.axis, na.rm=TRUE), digits = 3),
Vector.Sum.Composite_SD = round(sd(Vector.Sum.Composite, na.rm=TRUE), digits = 3),
unique_hour = unique_hour[1])
#### ADDING COMPLIANCE CRITERIA ###
active.minute.average$sd_composite_above_threshold = ifelse(active.minute.average$Vector.Sum.Composite_SD > compliance_threshold, 1, 0)
active.minute.average$sd_x_above_threshold = ifelse(active.minute.average$X.axis_SD > compliance_threshold, 1, 0)
active.minute.average$sd_y_above_threshold = ifelse(active.minute.average$Y.axis_SD > compliance_threshold, 1, 0)
active.minute.average$sd_z_above_threshold = ifelse(active.minute.average$Z.axis_SD > compliance_threshold, 1, 0)
active.minute.average$sd_composite_rollmean <- as.numeric(rollapply(active.minute.average$sd_composite_above_threshold, width=window_width, FUN = mean, align = "center", na.rm = TRUE, fill = NA)) ## **** NOTE **** To change the width of the rolling mean window for compliance, change the parameter for "width" w.
active.minute.average$compliance_rollmean <- ifelse(active.minute.average$sd_composite_rollmean > 0, 1, 0)
if (sum(!is.na(active.minute.average$compliance_rollmean)) > 0) {
active.minute.average.complete <- active.minute.average[complete.cases(active.minute.average),]
} else {
active.minute.average.complete <- active.minute.average
}
### SUBSETTING INTO 24 HOUR PERIODS ###
active.minute.average.complete$unique_min <- ymd_hms(active.minute.average.complete$unique_min, tz = timezone)
no.days <- ceiling(as.numeric(as.duration(active.minute.average.complete$unique_min[nrow(active.minute.average.complete)] - active.minute.average.complete$unique_min[1]))/86400) # calculates the difference in time between last and first datetime observation (in seconds), transforms it into days and returns the ceiling of days
dayindex <- active.minute.average.complete$unique_min[1] + hours(seq(from = 24, to = no.days*24, by = 24))
active.minute.average.complete$unique_24h <- 1
for (i in 1:no.days) {
active.minute.average.complete$unique_24h <- ifelse ((active.minute.average.complete$unique_min > dayindex[i]), i+1, active.minute.average.complete$unique_24h)
}
#### CALCULATING HOUR AVERAGES ####
active.hour.average = ddply(active.minute.average.complete, .(unique_hour), summarise,
RH.Corrected.Nephelometer = round(mean(RH.Corrected.Nephelometer, na.rm=TRUE), digits = 3),
Temp = round(mean(Temp, na.rm=TRUE), digits = 3),
RH = round(mean(RH, na.rm=TRUE), digits = 3),
Battery = round(mean(Battery, na.rm=TRUE), digits = 3),
Inlet.Press = round(mean(Inlet.Press, na.rm=TRUE), digits = 3),
Flow.Orifice.Press = round(mean(Flow.Orifice.Press, na.rm=TRUE), digits = 3),
Flow = round(mean(Flow, na.rm=TRUE), digits = 3),
count_composite_above_threshold = sum(sd_composite_above_threshold, na.rm=TRUE),
percent_composite_above_threshold = round(mean(sd_composite_above_threshold, na.rm=TRUE), digits = 3),
x_above_threshold = sum(sd_x_above_threshold, na.rm=TRUE),
x_percent_above_threshold = round(mean(sd_x_above_threshold, na.rm=TRUE), digits = 3),
y_above_threshold = sum(sd_y_above_threshold, na.rm=TRUE),
y_percent_above_threshold = round(mean(sd_y_above_threshold, na.rm=TRUE), digits = 3),
z_above_threshold = sum(sd_z_above_threshold, na.rm=TRUE),
z_percent_above_threshold = round(mean(sd_z_above_threshold, na.rm=TRUE), digits = 3),
total_minutes_observation = length(unique_min),
proportion_compliance_rollmean = round(sum(compliance_rollmean, na.rm = TRUE)/60, digits = 3),
datetime = unique_min[1],
unique_24h = unique_24h[1])
###### CALCULATING 24-HOUR AVERAGES #####
active.day.average = ddply(active.minute.average.complete, .(unique_24h), summarise,
RH.Corrected.Nephelometer = mean(RH.Corrected.Nephelometer, na.rm=TRUE),
Temp = mean(Temp, na.rm=TRUE),
RH = mean(RH, na.rm=TRUE),
Battery = mean(Battery, na.rm=TRUE),
Inlet.Press = mean(Inlet.Press, na.rm=TRUE),
Flow.Orifice.Press = mean(Flow.Orifice.Press, na.rm=TRUE),
Flow = mean(Flow, na.rm=TRUE),
count_composite_above_threshold = sum(sd_composite_above_threshold, na.rm=TRUE),
percent_composite_above_threshold = mean(sd_composite_above_threshold, na.rm=TRUE),
x_above_threshold = sum(sd_x_above_threshold, na.rm=TRUE),
x_percent_above_threshold = mean(sd_x_above_threshold, na.rm=TRUE),
y_above_threshold = sum(sd_y_above_threshold, na.rm=TRUE),
y_percent_above_threshold = mean(sd_y_above_threshold, na.rm=TRUE),
z_above_threshold = sum(sd_z_above_threshold, na.rm=TRUE),
z_percent_above_threshold = mean(sd_z_above_threshold, na.rm=TRUE),
hours_compliance_rollmean = round(sum((compliance_rollmean)/60, na.rm = TRUE), digits = 2),
total_minutes_observation = length(unique_min),
total_hours_observation = round(length(unique_min)/60, digits = 1),
datetime = unique_min[1])
####### MINUTE AVERAGE DATA ############################
active.minute.average.complete$Subject <- rep(subject)
active.minute.average.complete$Session <- rep(session)
# active.minute.average.complete <- active.minute.average.complete[,c(25:26, 1:24)]
####### HOUR AVERAGE DATA ############################
if (sum(!is.na(active.hour.average$x_percent_above_threshold))> 0) {
active.hour.average.complete <- active.hour.average[complete.cases(active.hour.average),]
} else {
active.hour.average.complete <- active.hour.average
}
active.hour.average.complete$Subject <- rep(subject)
active.hour.average.complete$Session <- rep(session)
##### ADDING HEPA CORRECTION TO NEPHELOMETER READINGS ####
## NOTE: CURRENTLY ONLY SET UP FOR ONE OR TWO HEPA SESSIONS ###
active.day.average$HEPA_corr_neph <- NA
active.minute.average.complete$HEPA_corr_neph <- NA
active.hour.average.complete$HEPA_corr_neph <- NA
# new
if (length(NEWHEPATIMES) ==1) {
HEPA_correction <- round(HEPA1.nephelometer, digits = 2)
active.day.average$HEPA_correction <- HEPA_correction
active.hour.average.complete$HEPA_correction <- HEPA_correction
active.minute.average.complete$HEPA_correction <- HEPA_correction
active.day.average$HEPA_corr_neph <- round(active.day.average$RH.Corrected.Nephelometer - active.day.average$HEPA_correction, digits = 2)
active.hour.average.complete$HEPA_corr_neph <- round(active.hour.average.complete$RH.Corrected.Nephelometer - active.hour.average.complete$HEPA_correction, digits = 3)
active.minute.average.complete$HEPA_corr_neph <- round(active.minute.average.complete$RH.Corrected.Nephelometer - active.minute.average.complete$HEPA_correction, digits = 3)
} else
# end new
if (length(NEWHEPATIMES) ==2) {
HEPA_correction <- seq(HEPA1.nephelometer, HEPA2.nephelometer, length.out= nrow(active.day.average)) # length = number of days sampled
active.day.average$HEPA_correction <- round(HEPA_correction, digits = 2)
for (i in 1:nrow(active.day.average)) {
active.day.average$HEPA_corr_neph[i] <- round(active.day.average$RH.Corrected.Nephelometer[i] - HEPA_correction[i], digits = 2) # why not just subtract the vectors?
active.minute.average.complete$HEPA_correction[active.minute.average.complete$unique_24h ==i] <- round(HEPA_correction[i], digits = 2) # sets up one HEPA correction value per 24 hours
active.minute.average.complete$HEPA_corr_neph <- round(active.minute.average.complete$RH.Corrected.Nephelometer - active.minute.average.complete$HEPA_correction, digits = 3)
active.hour.average.complete$HEPA_correction[active.hour.average.complete$unique_24h==i] <- round(HEPA_correction[i], digits = 2)
active.hour.average.complete$HEPA_corr_neph <- round(active.hour.average.complete$RH.Corrected.Nephelometer - active.hour.average.complete$HEPA_correction, digits = 3)
}} else
{ active.day.average$HEPA_correction <- NA
active.minute.average.complete$HEPA_correction <- NA
active.hour.average.complete$HEPA_correction <- NA
}
### NEW ###
### CALCULATING DELTA PRESSURE ###
if(!is.na(active.minute.average.complete$HEPA_corr_neph[1])) {
dep_rate <- active.minute.average.complete$HEPA_corr_neph * active.minute.average.complete$Flow * 1/1000 } else
{ dep_rate <- active.minute.average.complete$RH.Corrected.Nephelometer * active.minute.average.complete$Flow * 1/1000 }
active.minute.average.complete$cumulative_dep <- cumsum(dep_rate)
active.hour.average.complete$cumulative_dep <- round(tapply(X = active.minute.average.complete$cumulative_dep, INDEX = active.minute.average.complete$unique_hour, FUN = mean), digits = 3)
active.day.average$cumulative_dep <- round(tapply(X = active.minute.average.complete$cumulative_dep, INDEX = active.minute.average.complete$unique_24h, FUN = mean), digits = 2)
active.minute.average.complete$delta_pressure <- round(active.minute.average.complete$Inlet.Press - active.minute.average.complete$Inlet.Press[1], digits = 3)*2
# note - multiplying the Inlet Pressure x2 as per RTI
active.hour.average.complete$delta_pressure <- round(tapply(X = active.minute.average.complete$delta_pressure, INDEX = active.minute.average.complete$unique_hour, FUN = mean), digits = 3)
active.day.average$delta_pressure <- round(tapply(X = active.minute.average.complete$delta_pressure, INDEX = active.minute.average.complete$unique_24h, FUN = mean), digits = 2)
active.day.average$unique_24h <- paste("Day", active.day.average$unique_24h)
active.day.average$proportion_compliance_all <- NA
active.day.average$proportion_compliance_all <- ifelse(active.day.average$total_hours_observation ==0, NA, round(active.day.average$hours_compliance_rollmean/active.day.average$total_hours_observation, digits = 3))
active.day.average$proportion_compliance_all <- ifelse(is.na(active.day.average$percent_composite_above_threshold), NA, active.day.average$proportion_compliance_all)
################### SUMMARY DATA ###########################
##### TOTAL RUN TIME ####################
total_time <- sum(active.day.average$total_hours_observation)
total_time_minutes <- sum(active.day.average$total_minutes_observation)
total_minutes_worn <- sum(active.minute.average.complete$compliance_rollmean, na.rm = TRUE)
### START DATE & TIME ###
start_time = format(active.minute.average.complete$unique_min[1], format = "%d%b%y %H:%M:%S")
### STOP DATE & TIME ###
stop_time = format(active.minute.average.complete$unique_min[nrow(active.minute.average.complete)], format = "%d%b%y %H:%M:%S") # due to NAs at end of rolling mean for compliance, last minutes will be truncated
#### GENERATING ID FOR FILENAMES #####
lastdate <- substr(stop_time,1,7)
ID <- paste(subject, lastdate, session, serialnumber, filter, sep = "_")
#### AVERAGE ACTIVE SAMPLE NEPHELOMETER ####
average_sample_nephelometer = round(mean(as.numeric(active.data$RH.Corrected.Nephelometer), na.rm=TRUE), digits = 2)
average_sample_nephelometer_hepacorr <- round(mean(active.day.average$HEPA_corr_neph, na.rm= TRUE), digits = 2)
###### VOLTAGE DROP PER HOUR #####
# (Vb-Ve)*1000 mV/V ÷ (hours ran/2) (adjust for 50% duty cycle)- this number should be < 30 mV/hr for best pumps
voltage_b <- active.hour.average.complete$Battery[1]
voltage_e <- active.hour.average.complete$Battery[length(active.hour.average.complete)]
voltage_drop <- (voltage_b-voltage_e)*1000 / (total_time/2)
#### DATA SUMMARY #######
# active.data_summary = matrix(c(as.character(filter),
# serialnumber_full,
# round(total_time),
# round(total_time_minutes),
# as.character(start_time),
# as.character(stop_time),
# timezone,
# round(average_sample_nephelometer, digits = 2),
# average_sample_nephelometer_hepacorr,
# HEPATIMES,
# round(HEPA1.nephelometer, digits = 2),
# round(HEPA2.nephelometer, digits = 2),
# compliance_threshold,
# sum(active.minute.average$sd_composite_above_threshold, na.rm=TRUE),
# round(total_minutes_worn/60),
# round(mean(active.day.average$proportion_compliance_all, na.rm = TRUE)*100, digits =1),
# round(voltage_drop, digits = 2)),
# ncol = 1)
#
#
# active.data_summary = data.frame(active.data_summary, stringsAsFactors = FALSE)
# active.data_summary$variable = c("Filter", "Serialnumber", "Total Sampling Time (hrs)", "Total Sampling Time (mins)", "Start Time", "Stop Time", "Timezone", "Mean Active Nephelometer (ug/m^3)", "Mean Active Neph, HEPA corr (ug/m^3)", "HEPA1_times (start/stop)", "HEPA2_times (start/stop)",
# "Mean HEPA1 Nephelometer (ug/m^3)", "Mean HEPA2 Nephelometer (ug/m^3)", "Compliance Threshold for minutewise SD",
# "Total Time Composite SD>Threshold (mins)", "Total Hours Worn (hrs)", "Percent of Hours Worn (%)", "Avg Voltage Drop per Hour (mV/hr)")
# colnames(active.data_summary)[1] <- "V1"
active.data_summary2 = data.frame("Filter" = as.character(filter),
"Serialnumber" = serialnumber_full,
"Total Sampling Time.hrs" = round(total_time),
"Total Sampling Time.mins" = round(total_time_minutes),
"Start Time" = as.character(start_time),
"Stop Time" = as.character(stop_time),
"Timezone" = timezone,
"Mean Active Nephelometer.ug/m^3" = round(average_sample_nephelometer, digits = 2),
"Mean Active Neph HEPA corr.ug/m^3" = average_sample_nephelometer_hepacorr,
"HEPA1_times.start/stop" = HEPATIMES[1,1],
"HEPA2_times.start/stop" = HEPATIMES[2,1],
"Mean HEPA1 Nephelometer.ug/m^3" = round(HEPA1.nephelometer, digits = 2),
"Mean HEPA2 Nephelometer.ug/m^3" = round(HEPA2.nephelometer, digits = 2),
"Compliance Threshold for minutewise SD" = compliance_threshold,
"Total Time Composite SD over Threshold.mins" = sum(active.minute.average$sd_composite_above_threshold, na.rm=TRUE),
"Total Hours Worn.hrs" = round(total_minutes_worn/60),
"Percent of Hours Worn" = round(mean(active.day.average$proportion_compliance_all, na.rm = TRUE)*100, digits =1),
"Avg Voltage Drop.mV/hr" = round(voltage_drop, digits = 2))
active.data_summary <- data.frame("variable" = colnames(active.data_summary2), "V1" = t(active.data_summary2))
summary_24 <- as.matrix(t(active.day.average))
summary_24[2:18,] <- round(as.numeric(summary_24[2:18,]), digits = 2)
summary_24 <- as.data.frame(summary_24, stringsAsFactors = FALSE)
summary_24$variable <- as.character(colnames(active.day.average))
summary_24 <- summary_24[c(1,20,2, 22,21,3:19,23),]
summary_24$variable <- c("Unique 24-hour period", "24-hour period start date", "RH-Corrected Nephelometer (ug/m^3)", "HEPA Correction (ug/m^3)", "HEPA-Corrected Nephelometer (ug/m^3)", "Temp (C)", "RH (%)", "Battery (V)",
"Inlet.Pressure (H20)",
"Flow.Orifice.Pressure (H20)",
"Flow (Lpm)",
"count_composite_above_threshold (mins)",
"percent_composite_above_threshold (%)",
"x_above_threshold (mins)",
"x_percent_above_threshold (%)",
"y_above_threshold(mins)",
"y_percent_above_threshold (%)",
"z_above_threshold (mins)",
"z_percent_above_threshold (%)",
"Hours of Compliance (hrs, by rolling mean)",
"Active Sampling Minutes (mins)",
"Active Sampling Hours (hrs)",
"Percent of Hours Worn (%)")
labels_setup <- as.data.frame(t(rep("**SETUP**", 7)), stringsAsFactors = FALSE)
colnames(labels_setup)[7] <- "variable"
labels_summary <- as.data.frame(t(rep("**OVERALL.SUMMARY**", 7)), stringsAsFactors = FALSE)
colnames(labels_summary)[7] <- "variable"
labels_desc <- as.data.frame(t(rep("**EQUIPMENT.LOG**", 7)), stringsAsFactors = FALSE)
colnames(labels_desc)[7] <- "variable"
labels_24 <- as.data.frame(t(rep("**24.HOUR.SUMMARY**",7)), stringsAsFactors = FALSE)
colnames(labels_24)[7] <- "variable"
summary <- rbind.fill( labels_summary[,c(7,1:6)], active.data_summary, labels_24,summary_24, labels_setup, data_header,labels_desc, stop.desc)
# summary$Permanent_ID <- rep(permID)
summary$Subject <- rep(subject)
summary$Session <- rep(session)
summary <- summary[,c(8,9, 1:7)]
},
error = function(e) e
)
if(inherits(ErrorHandler, "error")) {
print(paste0("Not processed due to errors: ", filter))
next }
# save the summary
write.csv(summary, file = paste0(ID,"_MicroPEM_Summary.csv"))
# save the minute data
write.csv(active.minute.average.complete, file = paste0(ID, "_Data_Minute_Averages.csv"), row.names = F)
# add by-day compliance numbers to the summary
by_day_compliance <- as.data.frame(summary_24[20,1:(length(summary_24[20,]) - 1)])
for (i in 1:ncol(by_day_compliance)) {
colnames(by_day_compliance)[i] <- paste0("Day", i, "Compliance.hrs")
}
summary_table_n <- cbind(active.data_summary2, by_day_compliance)
summary_table <- rbind.fill(summary_table, summary_table_n)
}
colnames(summary_table) <- c("Filter", "Serialnumber", "Total Sampling Time (hrs)", "Total Sampling Time (mins)", "Start Time", "Stop Time", "Timezone", "Mean Active Nephelometer (ug/m^3)", "Mean Active Neph, HEPA corr (ug/m^3)", "HEPA1_times (start/stop)", "HEPA2_times (start/stop)", "Mean HEPA1 Nephelometer (ug/m^3)", "Mean HEPA2 Nephelometer (ug/m^3)", "Compliance Threshold for minutewise SD",
"Total Time Composite SD>Threshold (mins)", "Total Hours Worn (hrs)", "Percent of Hours Worn (%)", "Avg Voltage Drop per Hour (mV/hr)", "Day1Compliance.hrs", "Day2Compliance.hrs", "Day3Compliance.hrs")
not_processed <- loginfo[!loginfo$Filterid %in% summary_table$Filter,]
not_processed$problem <- "not processed"
problem_files <- rbind(problem_files, not_processed)
### DO THESE PARTS ONLY WHEN YOU WANT TO SAVE THE TABLES, give appropriate filenames ----
# save the problem files
write.csv(problem_files, file = paste0("MicroPEM_problem_files_", Sys.Date(), ".csv"), row.names = FALSE)
# save the summary table to file
write.csv(summary_table, file = paste0("MicroPEM_summary_table_", Sys.Date(), ".csv"), row.names = FALSE)
|
# week 3 - ggplot2
library(tidyverse)
data("mpg")
mpg = as_tibble(mpg)
hwy_rescaled = (mpg$hwy-min(mpg$hwy))/(max(mpg$hwy)-min(mpg$hwy)) # Rescaling hwy into [0,1]
displ_rescaled = (mpg$displ-min(mpg$displ))/(max(mpg$displ)-min(mpg$displ)) # Same for displ
ggplot(data=mpg) + geom_point(mapping=aes(x=displ, y=hwy, size=displ_rescaled*hwy_rescaled))
ggplot(data = mpg) +
geom_point(mapping=aes(x=displ, y=hwy, size=displ_rescaled*hwy_rescaled, alpha=cyl))
ggplot(data = mpg) + geom_point(mapping=aes(x=displ, y=hwy, colour = class), size = 4)
ggplot(data = mpg) + geom_point(mapping=aes(x=displ, y=hwy, shape = class), size = 4)
ggplot(data = mpg) +
geom_point(mapping = aes(x=displ, y=hwy, size=displ_rescaled*hwy_rescaled, alpha=cyl), position="jitter")
ggplot(data = mpg) + geom_text(mapping=aes(x=displ, y=hwy, label = class, colour = cyl))
ggplot(data = mpg) + geom_text(mapping=aes(x=displ, y=hwy, label = cyl, colour = class))
MyGraph <- ggplot(data=mpg) + geom_point(mapping=aes(x=displ,y=hwy), colour="red", shape=15)
MyGraph + facet_grid(class ~ drv)
#Smoothing
MyGraph + geom_smooth(mapping = aes(x=displ,y=hwy))
#Force linearity
MyGraph + geom_smooth(mapping = aes(x=displ,y=hwy), method = "lm", se = FALSE)
#Model for different groups
MyGraph + geom_smooth(mapping = aes(x=displ, y=hwy, group = drv), method = "lm", se = FALSE)
MyGraph + geom_smooth(mapping = aes(x=displ, y=hwy, colour = drv), method="lm", se=FALSE)
ggplot(data=mpg) + geom_point(mapping=aes(x=displ, y=hwy, colour=drv), shape=15, size=3) + geom_smooth(mapping=aes(x=displ, y=hwy, colour=drv), method="lm", se=FALSE)
# better to define aes for all up front - save repetition
ggplot(data=mpg, mapping=aes(x=displ, y=hwy, colour=drv)) + geom_point(shape=15, size=3) + geom_smooth(method="lm", se=FALSE)
ggplot(data=mpg, mapping=aes(x=displ, y=hwy)) + geom_point(shape=15, size=3) +
geom_smooth(method="lm", se=FALSE) + aes(colour = drv)
# can set globally (in first call to ggplot) or locally - below data is called locally in geom_point
ggplot() + geom_point(data = mpg, mapping=aes(x=displ, y=hwy))
library(mdsr)
data("CIACountries")
CIACountries = as_tibble(CIACountries)
ggplot(data = CIACountries) + geom_bar(mapping = aes(x = net_users))
CIACountries_Sample <- sample_n(CIACountries, size = 25)
ordered_countries <- reorder(CIACountries_Sample$country, CIACountries_Sample$pop)
# we set the argument stat to “identity” (its default value in geom_bar is stat = “count”) to force ggplot2 to use the y aesthetic,
# which we mapped to variable pop (population);
# note also use of coord_flip()
G <- ggplot(data = CIACountries_Sample) +
geom_bar(mapping = aes(x = ordered_countries, y = pop), stat = "identity") + coord_flip()
G
# %+%. By using this operator, we keep all the visual properties previously configured for the object,
# but we change the dataset to which those properties are applied
CIACountries_Sample <- sample_n(CIACountries, size=25) # Another Sample
ordered_countries <- reorder(CIACountries_Sample$country,CIACountries_Sample$pop)
G <- G %+% CIACountries_Sample # Update the data mapped to graph G
G
# Density plots
ggplot(data = CIACountries, mapping = aes(pop)) + geom_density() + scale_x_log10()
# adjust the bandwidth
ggplot(data = CIACountries, mapping = aes(pop)) + geom_density(adjust = 2) + scale_x_log10()
ggplot(data = CIACountries, mapping = aes(pop)) + geom_density(adjust = 0.2) + scale_x_log10()
ggplot(data = diamonds, mapping = aes(x = color, y = carat)) + geom_boxplot()
ggplot(data = diamonds, mapping = aes(x = clarity, y = carat)) + geom_boxplot()
ggplot(data = diamonds, mapping = aes(x = cut, y = carat)) + geom_boxplot()
# topic 1 discussion - mpg data
MyGraph + facet_wrap(c("trans"), ncol = 1)
MyGraph + facet_grid(trans ~ .)
plot = ggplot(data = mpg, aes(x=hwy, y=cty)) + geom_point()
plot + facet_wrap(c('class'))
plot + facet_grid(class ~ .)
# topic 3 exercise - diamonds data
ggplot(data = diamonds, aes(x=clarity, y=carat)) + geom_boxplot()
ggplot(data = diamonds, mapping = aes(x = cut, y = carat)) + geom_boxplot()
ggplot(data = diamonds, mapping = aes(x = clarity, y = carat)) +
geom_boxplot(outlier.color = "red", outlier.shape = 3) +
geom_jitter(width = 0.1, alpha = 0.05, color = "blue")
| /wk3-ggplot2.R | no_license | Josh-Myers/JCU-Foundations-for-Data-Science | R | false | false | 4,285 | r | # week 3 - ggplot2
library(tidyverse)
data("mpg")
mpg = as_tibble(mpg)
hwy_rescaled = (mpg$hwy-min(mpg$hwy))/(max(mpg$hwy)-min(mpg$hwy)) # Rescaling hwy into [0,1]
displ_rescaled = (mpg$displ-min(mpg$displ))/(max(mpg$displ)-min(mpg$displ)) # Same for displ
ggplot(data=mpg) + geom_point(mapping=aes(x=displ, y=hwy, size=displ_rescaled*hwy_rescaled))
ggplot(data = mpg) +
geom_point(mapping=aes(x=displ, y=hwy, size=displ_rescaled*hwy_rescaled, alpha=cyl))
ggplot(data = mpg) + geom_point(mapping=aes(x=displ, y=hwy, colour = class), size = 4)
ggplot(data = mpg) + geom_point(mapping=aes(x=displ, y=hwy, shape = class), size = 4)
ggplot(data = mpg) +
geom_point(mapping = aes(x=displ, y=hwy, size=displ_rescaled*hwy_rescaled, alpha=cyl), position="jitter")
ggplot(data = mpg) + geom_text(mapping=aes(x=displ, y=hwy, label = class, colour = cyl))
ggplot(data = mpg) + geom_text(mapping=aes(x=displ, y=hwy, label = cyl, colour = class))
MyGraph <- ggplot(data=mpg) + geom_point(mapping=aes(x=displ,y=hwy), colour="red", shape=15)
MyGraph + facet_grid(class ~ drv)
#Smoothing
MyGraph + geom_smooth(mapping = aes(x=displ,y=hwy))
#Force linearity
MyGraph + geom_smooth(mapping = aes(x=displ,y=hwy), method = "lm", se = FALSE)
#Model for different groups
MyGraph + geom_smooth(mapping = aes(x=displ, y=hwy, group = drv), method = "lm", se = FALSE)
MyGraph + geom_smooth(mapping = aes(x=displ, y=hwy, colour = drv), method="lm", se=FALSE)
ggplot(data=mpg) + geom_point(mapping=aes(x=displ, y=hwy, colour=drv), shape=15, size=3) + geom_smooth(mapping=aes(x=displ, y=hwy, colour=drv), method="lm", se=FALSE)
# better to define aes for all up front - save repetition
ggplot(data=mpg, mapping=aes(x=displ, y=hwy, colour=drv)) + geom_point(shape=15, size=3) + geom_smooth(method="lm", se=FALSE)
ggplot(data=mpg, mapping=aes(x=displ, y=hwy)) + geom_point(shape=15, size=3) +
geom_smooth(method="lm", se=FALSE) + aes(colour = drv)
# can set globally (in first call to ggplot) or locally - below data is called locally in geom_point
ggplot() + geom_point(data = mpg, mapping=aes(x=displ, y=hwy))
library(mdsr)
data("CIACountries")
CIACountries = as_tibble(CIACountries)
ggplot(data = CIACountries) + geom_bar(mapping = aes(x = net_users))
CIACountries_Sample <- sample_n(CIACountries, size = 25)
ordered_countries <- reorder(CIACountries_Sample$country, CIACountries_Sample$pop)
# we set the argument stat to “identity” (its default value in geom_bar is stat = “count”) to force ggplot2 to use the y aesthetic,
# which we mapped to variable pop (population);
# note also use of coord_flip()
G <- ggplot(data = CIACountries_Sample) +
geom_bar(mapping = aes(x = ordered_countries, y = pop), stat = "identity") + coord_flip()
G
# %+%. By using this operator, we keep all the visual properties previously configured for the object,
# but we change the dataset to which those properties are applied
CIACountries_Sample <- sample_n(CIACountries, size=25) # Another Sample
ordered_countries <- reorder(CIACountries_Sample$country,CIACountries_Sample$pop)
G <- G %+% CIACountries_Sample # Update the data mapped to graph G
G
# Density plots
ggplot(data = CIACountries, mapping = aes(pop)) + geom_density() + scale_x_log10()
# adjust the bandwidth
ggplot(data = CIACountries, mapping = aes(pop)) + geom_density(adjust = 2) + scale_x_log10()
ggplot(data = CIACountries, mapping = aes(pop)) + geom_density(adjust = 0.2) + scale_x_log10()
ggplot(data = diamonds, mapping = aes(x = color, y = carat)) + geom_boxplot()
ggplot(data = diamonds, mapping = aes(x = clarity, y = carat)) + geom_boxplot()
ggplot(data = diamonds, mapping = aes(x = cut, y = carat)) + geom_boxplot()
# topic 1 discussion - mpg data
MyGraph + facet_wrap(c("trans"), ncol = 1)
MyGraph + facet_grid(trans ~ .)
plot = ggplot(data = mpg, aes(x=hwy, y=cty)) + geom_point()
plot + facet_wrap(c('class'))
plot + facet_grid(class ~ .)
# topic 3 exercise - diamonds data
ggplot(data = diamonds, aes(x=clarity, y=carat)) + geom_boxplot()
ggplot(data = diamonds, mapping = aes(x = cut, y = carat)) + geom_boxplot()
ggplot(data = diamonds, mapping = aes(x = clarity, y = carat)) +
geom_boxplot(outlier.color = "red", outlier.shape = 3) +
geom_jitter(width = 0.1, alpha = 0.05, color = "blue")
|
testlist <- list(id = integer(0), x = c(-1.14111380724432e+306, 2.6099719324267e-312, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(ggforce:::enclose_points,testlist)
str(result) | /ggforce/inst/testfiles/enclose_points/libFuzzer_enclose_points/enclose_points_valgrind_files/1609955869-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 256 | r | testlist <- list(id = integer(0), x = c(-1.14111380724432e+306, 2.6099719324267e-312, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(ggforce:::enclose_points,testlist)
str(result) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.