content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
test_that("gargle_verbosity() defaults to 'info'", {
withr::local_options(list(
gargle_verbosity = NULL,
gargle_quiet = NULL
))
expect_equal(gargle_verbosity(), "info")
})
test_that("gargle_verbosity() validates the value it finds", {
withr::local_options(list(gargle_verbosity = TRUE))
expect_snapshot_error(gargle_verbosity())
})
test_that("gargle_verbosity() accomodates people using the old option", {
withr::local_options(list(
gargle_verbosity = NULL,
gargle_quiet = FALSE
))
expect_snapshot(
out <- gargle_verbosity()
)
expect_equal(out, "debug")
})
test_that("gargle_info() works", {
blah <- "BLAH"
local_gargle_verbosity("debug")
expect_snapshot(gargle_info(c("aa {.field {blah}} bb", "cc {.emph xyz} dd")))
local_gargle_verbosity("info")
expect_snapshot(gargle_info(c("ee {.field {blah}} ff", "gg {.emph xyz} hh")))
local_gargle_verbosity("silent")
expect_snapshot(gargle_info(c("ii {.field {blah}} jj", "kk {.emph xyz} ll")))
})
test_that("gargle_debug() works", {
foo <- "FOO"
local_gargle_verbosity("debug")
expect_snapshot(gargle_debug(c("11 {.field {foo}} 22", "33 {.file a/b/c} 44")))
local_gargle_verbosity("info")
expect_snapshot(gargle_debug(c("55 {.field {foo}} 66", "77 {.file a/b/c} 88")))
local_gargle_verbosity("silent")
expect_snapshot(gargle_debug(c("99 {.field {foo}} 00", "11 {.file a/b/c} 22")))
})
test_that("bulletize() works", {
expect_snapshot(cli::cli_bullets(bulletize(letters)))
expect_snapshot(cli::cli_bullets(bulletize(letters, bullet = "x")))
expect_snapshot(cli::cli_bullets(bulletize(letters, n_show = 2)))
expect_snapshot(cli::cli_bullets(bulletize(letters[1:6])))
expect_snapshot(cli::cli_bullets(bulletize(letters[1:7])))
expect_snapshot(cli::cli_bullets(bulletize(letters[1:8])))
expect_snapshot(cli::cli_bullets(bulletize(letters[1:6], n_fudge = 0)))
expect_snapshot(cli::cli_bullets(bulletize(letters[1:8], n_fudge = 3)))
})
|
/tests/testthat/test-utils-ui.R
|
permissive
|
muschellij2/gargle
|
R
| false
| false
| 1,968
|
r
|
test_that("gargle_verbosity() defaults to 'info'", {
withr::local_options(list(
gargle_verbosity = NULL,
gargle_quiet = NULL
))
expect_equal(gargle_verbosity(), "info")
})
test_that("gargle_verbosity() validates the value it finds", {
withr::local_options(list(gargle_verbosity = TRUE))
expect_snapshot_error(gargle_verbosity())
})
test_that("gargle_verbosity() accomodates people using the old option", {
withr::local_options(list(
gargle_verbosity = NULL,
gargle_quiet = FALSE
))
expect_snapshot(
out <- gargle_verbosity()
)
expect_equal(out, "debug")
})
test_that("gargle_info() works", {
blah <- "BLAH"
local_gargle_verbosity("debug")
expect_snapshot(gargle_info(c("aa {.field {blah}} bb", "cc {.emph xyz} dd")))
local_gargle_verbosity("info")
expect_snapshot(gargle_info(c("ee {.field {blah}} ff", "gg {.emph xyz} hh")))
local_gargle_verbosity("silent")
expect_snapshot(gargle_info(c("ii {.field {blah}} jj", "kk {.emph xyz} ll")))
})
test_that("gargle_debug() works", {
foo <- "FOO"
local_gargle_verbosity("debug")
expect_snapshot(gargle_debug(c("11 {.field {foo}} 22", "33 {.file a/b/c} 44")))
local_gargle_verbosity("info")
expect_snapshot(gargle_debug(c("55 {.field {foo}} 66", "77 {.file a/b/c} 88")))
local_gargle_verbosity("silent")
expect_snapshot(gargle_debug(c("99 {.field {foo}} 00", "11 {.file a/b/c} 22")))
})
test_that("bulletize() works", {
expect_snapshot(cli::cli_bullets(bulletize(letters)))
expect_snapshot(cli::cli_bullets(bulletize(letters, bullet = "x")))
expect_snapshot(cli::cli_bullets(bulletize(letters, n_show = 2)))
expect_snapshot(cli::cli_bullets(bulletize(letters[1:6])))
expect_snapshot(cli::cli_bullets(bulletize(letters[1:7])))
expect_snapshot(cli::cli_bullets(bulletize(letters[1:8])))
expect_snapshot(cli::cli_bullets(bulletize(letters[1:6], n_fudge = 0)))
expect_snapshot(cli::cli_bullets(bulletize(letters[1:8], n_fudge = 3)))
})
|
setwd("C:\\Users\\user\\Desktop\\R code")
demandCal = function(up,lo,times,demand_mean,demand_sd)
{
demandlist <- list()
demand <- c()
for(i in 1:times){
repeat{
demand[i] <- rnorm(1,mean = demand_mean,sd = demand_sd)
if(demand[i] < up && demand[i] > lo ) break
}
}
demandlist <- demand
return(demandlist)
}
fare1 <- demandCal(up= 35,lo= 15,times= 1000,demand_mean = 22,demand_sd =8.4)
write.csv(fare1, "C:\\Users\\user\\Desktop\\R code\\fare1.csv",row.names = F, col.names = F)
fare2 <- demandCal(up = 50,lo = 28,times = 1000,demand_mean = 35.6,demand_sd = 16.3)
write.csv(fare2, "C:\\Users\\user\\Desktop\\R code\\fare2.csv",row.names = F, col.names = F)
fare3 <- demandCal(up = 68,lo = 30,times = 1000,demand_mean = 39.8,demand_sd = 28.1)
write.csv(fare3, "C:\\Users\\user\\Desktop\\R code\\fare3.csv",row.names = F, col.names = F)
fare4 <- demandCal(up = 80,lo = 35,times = 1000,demand_mean = 50.4,demand_sd = 10.4)
write.csv(fare4, "C:\\Users\\user\\Desktop\\R code\\fare4.csv",row.names = F, col.names = F)
fare5 <- demandCal(up = 100,lo = 45,times = 1000,demand_mean = 55.5,demand_sd = 13.6)
write.csv(fare5, "C:\\Users\\user\\Desktop\\R code\\fare5.csv",row.names = F, col.names = F)
|
/20170308demanddata.R
|
no_license
|
tina850506/R_code
|
R
| false
| false
| 1,251
|
r
|
setwd("C:\\Users\\user\\Desktop\\R code")
demandCal = function(up,lo,times,demand_mean,demand_sd)
{
demandlist <- list()
demand <- c()
for(i in 1:times){
repeat{
demand[i] <- rnorm(1,mean = demand_mean,sd = demand_sd)
if(demand[i] < up && demand[i] > lo ) break
}
}
demandlist <- demand
return(demandlist)
}
fare1 <- demandCal(up= 35,lo= 15,times= 1000,demand_mean = 22,demand_sd =8.4)
write.csv(fare1, "C:\\Users\\user\\Desktop\\R code\\fare1.csv",row.names = F, col.names = F)
fare2 <- demandCal(up = 50,lo = 28,times = 1000,demand_mean = 35.6,demand_sd = 16.3)
write.csv(fare2, "C:\\Users\\user\\Desktop\\R code\\fare2.csv",row.names = F, col.names = F)
fare3 <- demandCal(up = 68,lo = 30,times = 1000,demand_mean = 39.8,demand_sd = 28.1)
write.csv(fare3, "C:\\Users\\user\\Desktop\\R code\\fare3.csv",row.names = F, col.names = F)
fare4 <- demandCal(up = 80,lo = 35,times = 1000,demand_mean = 50.4,demand_sd = 10.4)
write.csv(fare4, "C:\\Users\\user\\Desktop\\R code\\fare4.csv",row.names = F, col.names = F)
fare5 <- demandCal(up = 100,lo = 45,times = 1000,demand_mean = 55.5,demand_sd = 13.6)
write.csv(fare5, "C:\\Users\\user\\Desktop\\R code\\fare5.csv",row.names = F, col.names = F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ssm_operations.R
\name{ssm_get_parameter}
\alias{ssm_get_parameter}
\title{Get information about a parameter by using the parameter name}
\usage{
ssm_get_parameter(Name, WithDecryption)
}
\arguments{
\item{Name}{[required] The name of the parameter you want to query.}
\item{WithDecryption}{Return decrypted values for secure string parameters. This flag is
ignored for String and StringList parameter types.}
}
\description{
Get information about a parameter by using the parameter name. Don\'t
confuse this API action with the GetParameters API action.
}
\section{Request syntax}{
\preformatted{svc$get_parameter(
Name = "string",
WithDecryption = TRUE|FALSE
)
}
}
\keyword{internal}
|
/paws/man/ssm_get_parameter.Rd
|
permissive
|
johnnytommy/paws
|
R
| false
| true
| 770
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ssm_operations.R
\name{ssm_get_parameter}
\alias{ssm_get_parameter}
\title{Get information about a parameter by using the parameter name}
\usage{
ssm_get_parameter(Name, WithDecryption)
}
\arguments{
\item{Name}{[required] The name of the parameter you want to query.}
\item{WithDecryption}{Return decrypted values for secure string parameters. This flag is
ignored for String and StringList parameter types.}
}
\description{
Get information about a parameter by using the parameter name. Don\'t
confuse this API action with the GetParameters API action.
}
\section{Request syntax}{
\preformatted{svc$get_parameter(
Name = "string",
WithDecryption = TRUE|FALSE
)
}
}
\keyword{internal}
|
# fileloc = "C:/Users/Ian/Documents/Work/JP/Schizophrenia/Processed_data"
# filename = "FeatureMatrixCLEAN2REPSBACK.txt"
# srvyinds=3:40
# labels = c("Anxiety","Depression","Meds","Sleep","Psychosis","WSS")
# groupings = list()
# groupings[[1]] = c(3,6,8,13,23,25)
# groupings[[2]] = c(5,7,10,11,14,21,24,26)
# groupings[[3]] = c(9)
# groupings[[4]] = c(5,11,12,22)
# groupings[[5]] = c(8,15,16,27)
# groupings[[6]] = c(3,4,5,7,10,11,12,14,21,22,23)
#CombineSurveyResponses(fileloc,filename,srvyinds,groupings,labels)
combine_survey_responses = function(srvyinds,groupings,labels){
featurefile=paste(output_filepath,"/Processed_data","/Group",sep="")
filename=paste("feature_matrix_clean_",daysback,"daycarry.rds",sep="")
outfilename=paste("feature_matrix_clean_",daysback,"daycarry_combined.rds",sep="")
outtxtname=paste("feature_matrix_clean_",daysback,"daycarry_combined.txt",sep="")
infilename=paste(featurefile,filename,sep="/")
if(!file.exists(infilename)){return(NULL)}
dat=readRDS(infilename)[[1]]
#dat=read.csv(paste(fileloc,filename,sep="/"),stringsAsFactors=FALSE,header=TRUE,sep="\t")
outmat = cbind(dat[,1:2],matrix(NA,ncol=length(groupings),nrow=nrow(dat),dimnames=list(NULL,labels)),dat[,(max(srvyinds)+1):ncol(dat)])
for(j in 1:length(groupings)){
INDsPICK = groupings[[j]]
for(i in 1:nrow(dat)){
nAns=length(which(!is.na(dat[i,INDsPICK])))
if(nAns>0){
outmat[i,2+j]=sum(as.numeric(dat[i,INDsPICK]),na.rm=T)/(nAns)
}
}
}
saveRDS(list(outmat),paste(featurefile,outfilename,sep="/"))
write.table(outmat,file=paste(featurefile,outtxtname,sep="/"),sep="\t",quote=FALSE,row.names=FALSE)
}
|
/Processing/CombineSurveyResponses.R
|
no_license
|
ianjamesbarnett/AWARE_Processing_AutoReports
|
R
| false
| false
| 1,667
|
r
|
# fileloc = "C:/Users/Ian/Documents/Work/JP/Schizophrenia/Processed_data"
# filename = "FeatureMatrixCLEAN2REPSBACK.txt"
# srvyinds=3:40
# labels = c("Anxiety","Depression","Meds","Sleep","Psychosis","WSS")
# groupings = list()
# groupings[[1]] = c(3,6,8,13,23,25)
# groupings[[2]] = c(5,7,10,11,14,21,24,26)
# groupings[[3]] = c(9)
# groupings[[4]] = c(5,11,12,22)
# groupings[[5]] = c(8,15,16,27)
# groupings[[6]] = c(3,4,5,7,10,11,12,14,21,22,23)
#CombineSurveyResponses(fileloc,filename,srvyinds,groupings,labels)
combine_survey_responses = function(srvyinds,groupings,labels){
featurefile=paste(output_filepath,"/Processed_data","/Group",sep="")
filename=paste("feature_matrix_clean_",daysback,"daycarry.rds",sep="")
outfilename=paste("feature_matrix_clean_",daysback,"daycarry_combined.rds",sep="")
outtxtname=paste("feature_matrix_clean_",daysback,"daycarry_combined.txt",sep="")
infilename=paste(featurefile,filename,sep="/")
if(!file.exists(infilename)){return(NULL)}
dat=readRDS(infilename)[[1]]
#dat=read.csv(paste(fileloc,filename,sep="/"),stringsAsFactors=FALSE,header=TRUE,sep="\t")
outmat = cbind(dat[,1:2],matrix(NA,ncol=length(groupings),nrow=nrow(dat),dimnames=list(NULL,labels)),dat[,(max(srvyinds)+1):ncol(dat)])
for(j in 1:length(groupings)){
INDsPICK = groupings[[j]]
for(i in 1:nrow(dat)){
nAns=length(which(!is.na(dat[i,INDsPICK])))
if(nAns>0){
outmat[i,2+j]=sum(as.numeric(dat[i,INDsPICK]),na.rm=T)/(nAns)
}
}
}
saveRDS(list(outmat),paste(featurefile,outfilename,sep="/"))
write.table(outmat,file=paste(featurefile,outtxtname,sep="/"),sep="\t",quote=FALSE,row.names=FALSE)
}
|
\name{threads}
\alias{threads}
\title{E-Mail Threads}
\description{
Extract threads (i.e., chains of messages on a single subject) from
e-mail documents.
}
\usage{
threads(x)
}
\arguments{
\item{x}{A corpus consisting of e-mails (\code{MailDocument}s).}
}
\value{
A list with the two named components \code{ThreadID} and
\code{ThreadDepth}, listing a thread and the level of replies for each
mail in the corpus \code{x}.
}
\details{
This function uses a one-pass algorithm for extracting the thread
information by inspecting the \dQuote{References} header. Some mails
(e.g., reply mails appearing before their corresponding base mails)
might not be tagged correctly.
}
\examples{
require("tm")
newsgroup <- system.file("mails", package = "tm.plugin.mail")
news <- VCorpus(DirSource(newsgroup),
readerControl = list(reader = readMail))
vapply(news, meta, "id", FUN.VALUE = "")
lapply(news, function(x) meta(x, "header")$References)
(info <- threads(news))
lengths(split(news, info$ThreadID))
}
|
/man/threads.Rd
|
no_license
|
cran/tm.plugin.mail
|
R
| false
| false
| 1,031
|
rd
|
\name{threads}
\alias{threads}
\title{E-Mail Threads}
\description{
Extract threads (i.e., chains of messages on a single subject) from
e-mail documents.
}
\usage{
threads(x)
}
\arguments{
\item{x}{A corpus consisting of e-mails (\code{MailDocument}s).}
}
\value{
A list with the two named components \code{ThreadID} and
\code{ThreadDepth}, listing a thread and the level of replies for each
mail in the corpus \code{x}.
}
\details{
This function uses a one-pass algorithm for extracting the thread
information by inspecting the \dQuote{References} header. Some mails
(e.g., reply mails appearing before their corresponding base mails)
might not be tagged correctly.
}
\examples{
require("tm")
newsgroup <- system.file("mails", package = "tm.plugin.mail")
news <- VCorpus(DirSource(newsgroup),
readerControl = list(reader = readMail))
vapply(news, meta, "id", FUN.VALUE = "")
lapply(news, function(x) meta(x, "header")$References)
(info <- threads(news))
lengths(split(news, info$ThreadID))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MAUDE.R
\name{getZScalesWithNTGuides}
\alias{getZScalesWithNTGuides}
\title{Calculate Z-score scaling factors using non-targeting guides}
\usage{
getZScalesWithNTGuides(ntData, uGuidesPerElement, mergeBy, ntSampleFold = 10)
}
\arguments{
\item{ntData}{data.frame containing the data for the non-targeting guides}
\item{uGuidesPerElement}{a unique vector of guide counts per element}
\item{mergeBy}{usually contains a data.frame containing the headers that demarcate the screen ID}
\item{ntSampleFold}{how many times to sample each non-targeting guide to make the Z score scale (defaults to 10)}
}
\value{
a data.frame containing a Z-score scaling factor, one for every number of guides and unique entry in mergeBy
}
\description{
Calculates scaling factors to calibrate element-wise Z-scores by repeatedly calculating a set of "null" Z-scores by repeatedly sampling the given numbers of non-targeting guides per element.
}
\examples{
#not generally used directly
}
|
/man/getZScalesWithNTGuides.Rd
|
permissive
|
pimentel/MAUDE
|
R
| false
| true
| 1,047
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MAUDE.R
\name{getZScalesWithNTGuides}
\alias{getZScalesWithNTGuides}
\title{Calculate Z-score scaling factors using non-targeting guides}
\usage{
getZScalesWithNTGuides(ntData, uGuidesPerElement, mergeBy, ntSampleFold = 10)
}
\arguments{
\item{ntData}{data.frame containing the data for the non-targeting guides}
\item{uGuidesPerElement}{a unique vector of guide counts per element}
\item{mergeBy}{usually contains a data.frame containing the headers that demarcate the screen ID}
\item{ntSampleFold}{how many times to sample each non-targeting guide to make the Z score scale (defaults to 10)}
}
\value{
a data.frame containing a Z-score scaling factor, one for every number of guides and unique entry in mergeBy
}
\description{
Calculates scaling factors to calibrate element-wise Z-scores by repeatedly calculating a set of "null" Z-scores by repeatedly sampling the given numbers of non-targeting guides per element.
}
\examples{
#not generally used directly
}
|
=begin
=メニューのカスタマイズ
((<menus.xml|URL:MenusXml.html>))を編集することでメニューをカスタマイズできます。
Windows Mobile 5.0以降では、トップレベルのポップアップメニューの数を2個以下にするとソフトキーが使用できます。
=end
|
/docs/CustomizeMenus.rd
|
permissive
|
snakamura/q3
|
R
| false
| false
| 319
|
rd
|
=begin
=メニューのカスタマイズ
((<menus.xml|URL:MenusXml.html>))を編集することでメニューをカスタマイズできます。
Windows Mobile 5.0以降では、トップレベルのポップアップメニューの数を2個以下にするとソフトキーが使用できます。
=end
|
\name{runCommand}
\alias{runCommand}
\title{Issue the System Call That Invokes NONMEM}
\description{
If \code{udef} is \code{FALSE}, \code{command} is passed to \code{nm.pl} with supporting arguments. If \code{grid}
is \code{TRUE}, the result is passed to \code{qsub} with supporting arguments. If \code{boot}
is \code{TRUE}, \sQuote{&} is appended to the result. Each element of the result is executed
in succession using a system call customized by platform.
}
\usage{
runCommand(
command,
...,
run,
rdir,
boot,
urgent,
checksum,
grid,
udef = FALSE,
ctlfile,
outfile,
perl = if (nix()) 'perl -S' else if (!invisible) 'cmd /K perl -S' else 'cmd /C perl -S',
intern = invisible,
minimized = invisible,
invisible = FALSE,
split = grid,
N = glue('Run', run, if (split) c('c', 'e') else NULL),
o = rdir,
e = rdir,
L=if(split & interface=='nm.pl')c(compileflag(compiler(config(dirname(command)))),NA)else NA,
hold_jid = if (split) c(NA, glue('Run', run, 'c')) else NA,
V = '',
j = 'y',
q=if(split)
c(
'compile.q',
if(urgent)'all.q' else 'bootstrap.q'
)
else
if(!execute)
'compile.q'
else if(urgent)'all.q' else 'bootstrap.q',
sync = if (boot) 'n' else 'y',
shell = 'n',
b = 'y',
cwd = '',
compile = TRUE,
execute = TRUE,
background=FALSE,
interface = 'nm.pl'
)
}
\arguments{
\item{command}{a command to pass to \code{system}}
\item{run}{run name}
\item{rdir}{run directory}
\item{boot}{see \code{NONR}}
\item{urgent}{see \code{NONR}}
\item{checksum}{see \code{NONR}}
\item{grid}{see \code{NONR}}
\item{udef}{see \code{NONR}}
\item{ctlfile}{see \code{runNonmem}}
\item{outfile}{see \code{runNonmem}}
\item{perl}{a character string to invoke perl}
\item{intern}{see \code{NONR}, passed to \code{system}}
\item{minimized}{see \code{NONR}, passed to \code{system}}
\item{invisible}{see \code{runNonmem}}
\item{split}{whether compile and execute should be run separately}
\item{N}{passed to \code{qsub}}
\item{o}{passed to \code{qsub}}
\item{e}{passed to \code{qsub}}
\item{L}{passed to \code{qsub} as an instance of \sQuote{l}}
\item{hold\_jid}{passed to \code{qsub}}
\item{V}{passed to \code{qsub}}
\item{j}{passed to \code{qsub}}
\item{q}{passed to \code{qsub}}
\item{sync}{passed to \code{qsub}. Bootstrap runs usually occur in large quantities,
whereas SGE has an internal limitation on number of synchronized processes. Therefore,
no attempt is currently made to synchronize bootstrap runs.}
\item{shell}{passed to \code{qsub}}
\item{b}{passed to \code{qsub}}
\item{cwd}{passed to \code{qsub}}
\item{compile}{passed to \code{nm.pl}}
\item{execute}{passed to \code{nm.pl}, influences default for \code{q}}
\item{background}{\code{TRUE} appends \sQuote{l} to command lines to put the
process in the background. Defunct?}
\item{interface}{the name of a function to prepare NONMEM command lines}
\item{\dots}{passed to \code{nm.pl} and \code{qsub}}
}
\details{
The argument \sQuote{L} represents a possibly-constituative instance of \code{qsub}'s \sQuote{l},
but is not called \sQuote{l} so that other instances of \sQuote{l} (multiple are allowed)
will not accidentally override it. Users can override intentionally, of course.
\code{N}, \code{L}, and \code{hold_jid} are coordinated so that if a run is split, compile status is flagged
on the compile run, and the execute run waits for compile to finish.
\sQuote{q} is handled specially. When overriding, be sure to pass a character vector of
length one for a normal run, and of length two if \code{split} is \code{TRUE} (the default when
\code{grid} is \code{TRUE}). By default, all standalone compile-only runs are diverted to
\sQuote{compile.q}, as well as all compile halves of split runs. By default, all execute-only
runs as well as the execute halves of split runs are diverted to \sQuote{all.q} if urgent,
and \sQuote{bootstrap.q} otherwise.
}
\value{Used for side effects}
\references{\url{http://mifuns.googlecode.com}}
\author{Tim Bergsma}
\seealso{
\itemize{
\item \code{\link{runNonmem}}
}
}
\keyword{manip}
|
/man/runCommand.Rd
|
no_license
|
cran/MIfuns
|
R
| false
| false
| 4,149
|
rd
|
\name{runCommand}
\alias{runCommand}
\title{Issue the System Call That Invokes NONMEM}
\description{
If \code{udef} is \code{FALSE}, \code{command} is passed to \code{nm.pl} with supporting arguments. If \code{grid}
is \code{TRUE}, the result is passed to \code{qsub} with supporting arguments. If \code{boot}
is \code{TRUE}, \sQuote{&} is appended to the result. Each element of the result is executed
in succession using a system call customized by platform.
}
\usage{
runCommand(
command,
...,
run,
rdir,
boot,
urgent,
checksum,
grid,
udef = FALSE,
ctlfile,
outfile,
perl = if (nix()) 'perl -S' else if (!invisible) 'cmd /K perl -S' else 'cmd /C perl -S',
intern = invisible,
minimized = invisible,
invisible = FALSE,
split = grid,
N = glue('Run', run, if (split) c('c', 'e') else NULL),
o = rdir,
e = rdir,
L=if(split & interface=='nm.pl')c(compileflag(compiler(config(dirname(command)))),NA)else NA,
hold_jid = if (split) c(NA, glue('Run', run, 'c')) else NA,
V = '',
j = 'y',
q=if(split)
c(
'compile.q',
if(urgent)'all.q' else 'bootstrap.q'
)
else
if(!execute)
'compile.q'
else if(urgent)'all.q' else 'bootstrap.q',
sync = if (boot) 'n' else 'y',
shell = 'n',
b = 'y',
cwd = '',
compile = TRUE,
execute = TRUE,
background=FALSE,
interface = 'nm.pl'
)
}
\arguments{
\item{command}{a command to pass to \code{system}}
\item{run}{run name}
\item{rdir}{run directory}
\item{boot}{see \code{NONR}}
\item{urgent}{see \code{NONR}}
\item{checksum}{see \code{NONR}}
\item{grid}{see \code{NONR}}
\item{udef}{see \code{NONR}}
\item{ctlfile}{see \code{runNonmem}}
\item{outfile}{see \code{runNonmem}}
\item{perl}{a character string to invoke perl}
\item{intern}{see \code{NONR}, passed to \code{system}}
\item{minimized}{see \code{NONR}, passed to \code{system}}
\item{invisible}{see \code{runNonmem}}
\item{split}{whether compile and execute should be run separately}
\item{N}{passed to \code{qsub}}
\item{o}{passed to \code{qsub}}
\item{e}{passed to \code{qsub}}
\item{L}{passed to \code{qsub} as an instance of \sQuote{l}}
\item{hold\_jid}{passed to \code{qsub}}
\item{V}{passed to \code{qsub}}
\item{j}{passed to \code{qsub}}
\item{q}{passed to \code{qsub}}
\item{sync}{passed to \code{qsub}. Bootstrap runs usually occur in large quantities,
whereas SGE has an internal limitation on number of synchronized processes. Therefore,
no attempt is currently made to synchronize bootstrap runs.}
\item{shell}{passed to \code{qsub}}
\item{b}{passed to \code{qsub}}
\item{cwd}{passed to \code{qsub}}
\item{compile}{passed to \code{nm.pl}}
\item{execute}{passed to \code{nm.pl}, influences default for \code{q}}
\item{background}{\code{TRUE} appends \sQuote{l} to command lines to put the
process in the background. Defunct?}
\item{interface}{the name of a function to prepare NONMEM command lines}
\item{\dots}{passed to \code{nm.pl} and \code{qsub}}
}
\details{
The argument \sQuote{L} represents a possibly-constituative instance of \code{qsub}'s \sQuote{l},
but is not called \sQuote{l} so that other instances of \sQuote{l} (multiple are allowed)
will not accidentally override it. Users can override intentionally, of course.
\code{N}, \code{L}, and \code{hold_jid} are coordinated so that if a run is split, compile status is flagged
on the compile run, and the execute run waits for compile to finish.
\sQuote{q} is handled specially. When overriding, be sure to pass a character vector of
length one for a normal run, and of length two if \code{split} is \code{TRUE} (the default when
\code{grid} is \code{TRUE}). By default, all standalone compile-only runs are diverted to
\sQuote{compile.q}, as well as all compile halves of split runs. By default, all execute-only
runs as well as the execute halves of split runs are diverted to \sQuote{all.q} if urgent,
and \sQuote{bootstrap.q} otherwise.
}
\value{Used for side effects}
\references{\url{http://mifuns.googlecode.com}}
\author{Tim Bergsma}
\seealso{
\itemize{
\item \code{\link{runNonmem}}
}
}
\keyword{manip}
|
source('.Rprofile')
##
# get data for doc
# segment polygon for old tampa bay
seg_shp <- readShapeSpatial('seagrass_gis/seg_820.shp')
# seagrass bathmetry intersected points for old tampa bay
sgpts_shp <- readShapeSpatial('seagrass_gis/sgpts_820_2006_buff.shp')
# set ggplot theme
theme_set(theme_bw())
# for debugging
grid_spc <- 0.02
grid_seed <- 1234
test_pt <- 2
radius <- 0.04
thresh <- 0.1
show_all <- F
# random points
set.seed(grid_seed)
pts <- grid_est(seg_shp, spacing = grid_spc)
# point from random points for buffer
test_pt <- pts[test_pt, ]
# get bathym points around test_pt
buff_pts <- buff_ext(sgpts_shp, test_pt, buff = radius)
##
# example
maxd <- list()
for(i in 1:length(pts)){
eval_pt <- pts[i, ]
buff_pts <- buff_ext(sgpts_shp, eval_pt, buff = radius)
est_pts <- data.frame(buff_pts)
est_pts$Depth <- -1 * est_pts$GRID_CODE
ests <- doc_est(est_pts, thresh = thresh,
depth_var = 'Depth', sg_var = 'SEAGRASS'
)
maxd[[i]] <- ests$ests
}
# combine in data frame for plotting
maxd <- data.frame(pts, zmax_all = do.call('c', maxd))
# get values for combined legend
rngs <- range(maxd$zmax_all, na.rm = T)
brks <- seq(rngs[1], rngs[2], length = 5)
labs <- format(round(brks, 1), nsmall = 1, digits =1)
# unestimable points to plot
unest <- maxd[is.na(maxd[, 'zmax_all']), ]
##
# plot
p1 <- ggplot(seg_shp, aes(long, lat)) +
geom_polygon(fill = 'white') +
geom_path(color = 'black') +
theme_classic() +
coord_equal() +
ylab('Latitude') +
xlab('Longitude') +
geom_point(
data = maxd,
aes(Var1, Var2, size = zmax_all, colour = zmax_all)
) +
# geom_point(data = unest,
# aes(Var1, Var2),
# size = 3, colour = 'grey',
# pch = 1
# ) +
ggtitle('Depth of col (m)') +
theme(legend.position = c(0,0), legend.justification = c(0, 0)) +
scale_size_continuous(name = "Depth estimate",
breaks = brks,
labels = labs,
range = c(1, 12)) +
scale_colour_gradient(name = "Depth estimate",
breaks = brks,
labels = labs) +
guides(colour = guide_legend())
|
/extra.R
|
no_license
|
fawda123/bigbend_seagrass
|
R
| false
| false
| 2,089
|
r
|
source('.Rprofile')
##
# get data for doc
# segment polygon for old tampa bay
seg_shp <- readShapeSpatial('seagrass_gis/seg_820.shp')
# seagrass bathmetry intersected points for old tampa bay
sgpts_shp <- readShapeSpatial('seagrass_gis/sgpts_820_2006_buff.shp')
# set ggplot theme
theme_set(theme_bw())
# for debugging
grid_spc <- 0.02
grid_seed <- 1234
test_pt <- 2
radius <- 0.04
thresh <- 0.1
show_all <- F
# random points
set.seed(grid_seed)
pts <- grid_est(seg_shp, spacing = grid_spc)
# point from random points for buffer
test_pt <- pts[test_pt, ]
# get bathym points around test_pt
buff_pts <- buff_ext(sgpts_shp, test_pt, buff = radius)
##
# example
maxd <- list()
for(i in 1:length(pts)){
eval_pt <- pts[i, ]
buff_pts <- buff_ext(sgpts_shp, eval_pt, buff = radius)
est_pts <- data.frame(buff_pts)
est_pts$Depth <- -1 * est_pts$GRID_CODE
ests <- doc_est(est_pts, thresh = thresh,
depth_var = 'Depth', sg_var = 'SEAGRASS'
)
maxd[[i]] <- ests$ests
}
# combine in data frame for plotting
maxd <- data.frame(pts, zmax_all = do.call('c', maxd))
# get values for combined legend
rngs <- range(maxd$zmax_all, na.rm = T)
brks <- seq(rngs[1], rngs[2], length = 5)
labs <- format(round(brks, 1), nsmall = 1, digits =1)
# unestimable points to plot
unest <- maxd[is.na(maxd[, 'zmax_all']), ]
##
# plot
p1 <- ggplot(seg_shp, aes(long, lat)) +
geom_polygon(fill = 'white') +
geom_path(color = 'black') +
theme_classic() +
coord_equal() +
ylab('Latitude') +
xlab('Longitude') +
geom_point(
data = maxd,
aes(Var1, Var2, size = zmax_all, colour = zmax_all)
) +
# geom_point(data = unest,
# aes(Var1, Var2),
# size = 3, colour = 'grey',
# pch = 1
# ) +
ggtitle('Depth of col (m)') +
theme(legend.position = c(0,0), legend.justification = c(0, 0)) +
scale_size_continuous(name = "Depth estimate",
breaks = brks,
labels = labs,
range = c(1, 12)) +
scale_colour_gradient(name = "Depth estimate",
breaks = brks,
labels = labs) +
guides(colour = guide_legend())
|
# GO_MWU uses continuous measure of significance (such as fold-change or -log(p-value) ) to identify GO categories that are significantly enriches with either up- or down-regulated genes. The advantage - no need to impose arbitrary significance cutoff.
# If the measure is binary (0 or 1) the script will perform a typical "GO enrichment" analysis based Fisher's exact test: it will show GO categories over-represented among the genes that have 1 as their measure.
# On the plot, different fonts are used to indicate significance and color indicates enrichment with either up (red) or down (blue) regulated genes. No colors are shown for binary measure analysis.
# The tree on the plot is hierarchical clustering of GO categories based on shared genes. Categories with no branch length between them are subsets of each other.
# The fraction next to GO category name indicates the fracton of "good" genes in it; "good" genes being the ones exceeding the arbitrary absValue cutoff (option in gomwuPlot). For Fisher's based test, specify absValue=0.5. This value does not affect statistics and is used for plotting only.
# Stretch the plot manually to match tree to text
# Mikhail V. Matz, UT Austin, February 2015; matz@utexas.edu
################################################################
# First, press command-D on mac or ctrl-shift-H in Rstudio and navigate to the directory containing scripts and input files. Then edit, mark and execute the following bits of code, one after another.
###If the CSV from excel is messed up, rewrite it with no quotes, row names, column names###
mydata=read.csv("magenta_kme_GOMWU.csv")
write.csv(mydata, file = "new_magenta_kme_GOMWU.csv", append = FALSE, quote = FALSE, sep = " ",
eol = "\n", na = "NA", dec = ".", row.names = FALSE,
col.names = FALSE, qmethod = c("escape", "double"),
fileEncoding = "")
# Edit these to match your data file names:
input="new_magenta_kme_GOMWU.csv"
#two columns of comma-separated values: gene id, continuous measure of significance. To perform standard GO enrichment analysis based on Fisher's exact test, use binary measure (0 or 1, i.e., either sgnificant or not).
goAnnotations="annos.tab"
#two-column, tab-delimited, one line per gene, multiple GO terms separated by semicolon. If you have multiple lines per gene, use nrify_GOtable.pl prior to running this script.
goDatabase="go.obo"
#download from http://www.geneontology.org/GO.downloads.ontology.shtml
goDivision="BP"
#either MF, or BP, or CC
source("gomwu.functions.R")
# Calculating stats. It takes ~3 min for MF and BP. Do not rerun it if you just want to replot the data with different cutoffs, go straight to gomwuPlot. If you change any of the numeric values below, delete the files that were generated in previos runs first.
gomwuStats(input, goDatabase, goAnnotations, goDivision,
perlPath="perl", # replace with full path to perl executable if it is not in your system's PATH already
largest=0.1, # a GO category will not be considered if it contains more than this fraction of the total number of genes
smallest=5, # a GO category should contain at least this many genes to be considered
clusterCutHeight=0.25, # threshold for merging similar (gene-sharing) terms.
# Alternative="g" # by default the MWU test is two-tailed; specify "g" or "l" of you want to test for "greater" or "less" instead.
Module=TRUE,Alternative="g" # un-remark this if you are analyzing a SIGNED WGCNA module (values: 0 for not in module genes, kME for in-module genes). In the call to gomwuPlot below, specify absValue=0.001 (count number of "good genes" that fall into the module)
# Module=TRUE # un-remark this if you are analyzing an UNSIGNED WGCNA module
)
# do not continue if the printout shows that no GO terms pass 10% FDR.
# you should only have ONE "dissim" file in your directory before plotting. you will get an error message otherwise. Delete any old "dissim" files from other comparisons from this directory
# Plotting results
quartz()
gomwuPlot(input,goAnnotations,goDivision,
#absValue=-log(0.05,10), # genes with the measure value exceeding this will be counted as "good genes". Specify absValue=0.001 if you are doing Fisher's exact test for standard GO enrichment or analyzing a WGCNA module (all non-zero genes = "good genes").
absValue=0.001,
level1=0.05, # FDR threshold for plotting. Specify level1=1 to plot all GO categories containing genes exceeding the absValue.
level2=0.01, # FDR cutoff to print in regular (not italic) font.
level3=0.005, # FDR cutoff to print in large bold font.
txtsize=1, # decrease to fit more on one page, or increase (after rescaling the plot so the tree fits the text) for better "word cloud" effect
treeHeight=0.5, # height of the hierarchical clustering tree
colors=c("black") # these are default colors, un-remar and change if needed
)
# manually rescale the plot so the tree matches the text
# if there are too many categories displayed, try make it more stringent with level1=0.01,level2=0.005,level3=0.001.
|
/GO_MWU_Diversity_WGCNA_magenta.R
|
no_license
|
lfuess/MicrobiomeMS
|
R
| false
| false
| 5,143
|
r
|
# GO_MWU uses continuous measure of significance (such as fold-change or -log(p-value) ) to identify GO categories that are significantly enriches with either up- or down-regulated genes. The advantage - no need to impose arbitrary significance cutoff.
# If the measure is binary (0 or 1) the script will perform a typical "GO enrichment" analysis based Fisher's exact test: it will show GO categories over-represented among the genes that have 1 as their measure.
# On the plot, different fonts are used to indicate significance and color indicates enrichment with either up (red) or down (blue) regulated genes. No colors are shown for binary measure analysis.
# The tree on the plot is hierarchical clustering of GO categories based on shared genes. Categories with no branch length between them are subsets of each other.
# The fraction next to GO category name indicates the fracton of "good" genes in it; "good" genes being the ones exceeding the arbitrary absValue cutoff (option in gomwuPlot). For Fisher's based test, specify absValue=0.5. This value does not affect statistics and is used for plotting only.
# Stretch the plot manually to match tree to text
# Mikhail V. Matz, UT Austin, February 2015; matz@utexas.edu
################################################################
# First, press command-D on mac or ctrl-shift-H in Rstudio and navigate to the directory containing scripts and input files. Then edit, mark and execute the following bits of code, one after another.
###If the CSV from excel is messed up, rewrite it with no quotes, row names, column names###
mydata=read.csv("magenta_kme_GOMWU.csv")
write.csv(mydata, file = "new_magenta_kme_GOMWU.csv", append = FALSE, quote = FALSE, sep = " ",
eol = "\n", na = "NA", dec = ".", row.names = FALSE,
col.names = FALSE, qmethod = c("escape", "double"),
fileEncoding = "")
# Edit these to match your data file names:
input="new_magenta_kme_GOMWU.csv"
#two columns of comma-separated values: gene id, continuous measure of significance. To perform standard GO enrichment analysis based on Fisher's exact test, use binary measure (0 or 1, i.e., either sgnificant or not).
goAnnotations="annos.tab"
#two-column, tab-delimited, one line per gene, multiple GO terms separated by semicolon. If you have multiple lines per gene, use nrify_GOtable.pl prior to running this script.
goDatabase="go.obo"
#download from http://www.geneontology.org/GO.downloads.ontology.shtml
goDivision="BP"
#either MF, or BP, or CC
source("gomwu.functions.R")
# Calculating stats. It takes ~3 min for MF and BP. Do not rerun it if you just want to replot the data with different cutoffs, go straight to gomwuPlot. If you change any of the numeric values below, delete the files that were generated in previos runs first.
gomwuStats(input, goDatabase, goAnnotations, goDivision,
perlPath="perl", # replace with full path to perl executable if it is not in your system's PATH already
largest=0.1, # a GO category will not be considered if it contains more than this fraction of the total number of genes
smallest=5, # a GO category should contain at least this many genes to be considered
clusterCutHeight=0.25, # threshold for merging similar (gene-sharing) terms.
# Alternative="g" # by default the MWU test is two-tailed; specify "g" or "l" of you want to test for "greater" or "less" instead.
Module=TRUE,Alternative="g" # un-remark this if you are analyzing a SIGNED WGCNA module (values: 0 for not in module genes, kME for in-module genes). In the call to gomwuPlot below, specify absValue=0.001 (count number of "good genes" that fall into the module)
# Module=TRUE # un-remark this if you are analyzing an UNSIGNED WGCNA module
)
# do not continue if the printout shows that no GO terms pass 10% FDR.
# you should only have ONE "dissim" file in your directory before plotting. you will get an error message otherwise. Delete any old "dissim" files from other comparisons from this directory
# Plotting results
quartz()
gomwuPlot(input,goAnnotations,goDivision,
#absValue=-log(0.05,10), # genes with the measure value exceeding this will be counted as "good genes". Specify absValue=0.001 if you are doing Fisher's exact test for standard GO enrichment or analyzing a WGCNA module (all non-zero genes = "good genes").
absValue=0.001,
level1=0.05, # FDR threshold for plotting. Specify level1=1 to plot all GO categories containing genes exceeding the absValue.
level2=0.01, # FDR cutoff to print in regular (not italic) font.
level3=0.005, # FDR cutoff to print in large bold font.
txtsize=1, # decrease to fit more on one page, or increase (after rescaling the plot so the tree fits the text) for better "word cloud" effect
treeHeight=0.5, # height of the hierarchical clustering tree
colors=c("black") # these are default colors, un-remar and change if needed
)
# manually rescale the plot so the tree matches the text
# if there are too many categories displayed, try make it more stringent with level1=0.01,level2=0.005,level3=0.001.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fun_recovery.R
\name{calcRecoveryTS}
\alias{calcRecoveryTS}
\title{Calculate recovery for a single time series}
\usage{
calcRecoveryTS(
tsi,
maxBreak,
obspyr,
inp = "segmented",
shortDenseTS = TRUE,
nPre = 2,
nDist = 12,
nPostMin = 4,
nPostMax = 6,
h = 0.15,
timeThres,
seas
)
}
\arguments{
\item{tsi}{vector: the first n values contain the timing of the disturbances and the next n values the observations for which the recovery indicators should be computed}
\item{maxBreak}{(only for recovery indicators derived after piecewise regression): if maxbreak is true, the maximum break in the segmented series is used as disturbance date to calculate the recovery indicators. If maxbreak is false, the break closest to the provided disturbance timing is used to calculate recovery.}
\item{obspyr}{the number of observations per year}
\item{inp}{the preprocessing applied to the time series before computing the recovery indicators: segmented (for piecewise regression), smooth (time series smoothing using loess), or raw (no preprocessing)}
\item{shortDenseTS}{In case FALSE, the metrics follow closely the definitions given by Frazier et al}
\item{nPre}{number of years prior to the disturbance that are used to derive the pre-disturbance condition}
\item{nDist}{number of observations used to derive the disturbance state}
\item{nPostMin}{start of the post-disturbance period: number of years after the disturbance}
\item{nPostMax}{end of the post-disturbance period: number of years after the disturbance}
\item{h}{h parameter of the breakpoints function in the strucchange package}
\item{timeThres}{only relevant for piecewise regression: threshold on the duration between the disturbance date and date of the detected break [years]}
\item{seas}{should a seasonal comonent be used in the piecewise regression?}
}
\value{
the RRI, R80P, YrYr and the slope of the pos-disturbance segment
}
\description{
Calculate recovery for a single time series
}
|
/man/calcRecoveryTS.Rd
|
permissive
|
RETURN-project/UpscaleRecovery
|
R
| false
| true
| 2,114
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fun_recovery.R
\name{calcRecoveryTS}
\alias{calcRecoveryTS}
\title{Calculate recovery for a single time series}
\usage{
calcRecoveryTS(
tsi,
maxBreak,
obspyr,
inp = "segmented",
shortDenseTS = TRUE,
nPre = 2,
nDist = 12,
nPostMin = 4,
nPostMax = 6,
h = 0.15,
timeThres,
seas
)
}
\arguments{
\item{tsi}{vector: the first n values contain the timing of the disturbances and the next n values the observations for which the recovery indicators should be computed}
\item{maxBreak}{(only for recovery indicators derived after piecewise regression): if maxbreak is true, the maximum break in the segmented series is used as disturbance date to calculate the recovery indicators. If maxbreak is false, the break closest to the provided disturbance timing is used to calculate recovery.}
\item{obspyr}{the number of observations per year}
\item{inp}{the preprocessing applied to the time series before computing the recovery indicators: segmented (for piecewise regression), smooth (time series smoothing using loess), or raw (no preprocessing)}
\item{shortDenseTS}{In case FALSE, the metrics follow closely the definitions given by Frazier et al}
\item{nPre}{number of years prior to the disturbance that are used to derive the pre-disturbance condition}
\item{nDist}{number of observations used to derive the disturbance state}
\item{nPostMin}{start of the post-disturbance period: number of years after the disturbance}
\item{nPostMax}{end of the post-disturbance period: number of years after the disturbance}
\item{h}{h parameter of the breakpoints function in the strucchange package}
\item{timeThres}{only relevant for piecewise regression: threshold on the duration between the disturbance date and date of the detected break [years]}
\item{seas}{should a seasonal comonent be used in the piecewise regression?}
}
\value{
the RRI, R80P, YrYr and the slope of the pos-disturbance segment
}
\description{
Calculate recovery for a single time series
}
|
WriteOnNoteBook <- function(total.page.count) {
count <- 0
repeat {
count <- count + 1
if (count > total.page.count) {
print("Page finished")
break
}
print(paste("writing on page number", count))
}
}
WriteOnNoteBook(total.page.count = 10)
|
/RProgrammingFundamentals/07.R_Flow_Control/07RepeatWithBreak.R
|
no_license
|
samuraoka/RExamples
|
R
| false
| false
| 308
|
r
|
WriteOnNoteBook <- function(total.page.count) {
count <- 0
repeat {
count <- count + 1
if (count > total.page.count) {
print("Page finished")
break
}
print(paste("writing on page number", count))
}
}
WriteOnNoteBook(total.page.count = 10)
|
library(rjson)
library(jsonlite)
library(data.table)
# library(gsheet)
source(file = "01_Settings/Path.R", local = T, encoding = "UTF-8")
source(file = "00_System/Generate.ProcessData.R", local = T, encoding = "UTF-8")
# ====けんもデータ====
# positiveDetail <- gsheet2tbl("docs.google.com/spreadsheets/d/1Cy4W9hYhGmABq1GuhLOkM92iYss0qy03Y1GeTv4bCyg/edit#gid=1196047345")
# fwrite(x = positiveDetail, file = paste0(DATA_PATH, "positiveDetail.csv"))
#
# provincePCR <- gsheet2tbl("docs.google.com/spreadsheets/d/1Cy4W9hYhGmABq1GuhLOkM92iYss0qy03Y1GeTv4bCyg/edit#gid=845297461")
# fwrite(x = provincePCR, file = paste0(DATA_PATH, "provincePCR.csv"))
# 市レベル
# provinceAttr <- fread(paste0(DATA_PATH, "Signate/prefMaster.csv"))
#
# provinceAttr[都道府県コード %in% 1:7, regionName := "北海道・東北地方"]
# provinceAttr[都道府県コード %in% 8:14, regionName := "関東地方"]
# provinceAttr[都道府県コード %in% 15:23, regionName := "中部地方"]
# provinceAttr[都道府県コード %in% 24:30, regionName := "近畿地方"]
# provinceAttr[都道府県コード %in% 31:35, regionName := "中国地方"]
# provinceAttr[都道府県コード %in% 36:39, regionName := "四国地方"]
# provinceAttr[都道府県コード %in% 40:47, regionName := "九州地方・沖縄"]
#
# provinceAttr <- provinceAttr[, .(都道府県, regionName)]
# kenmoAreaDataset <- gsheet2tbl("https://docs.google.com/spreadsheets/d/1Cy4W9hYhGmABq1GuhLOkM92iYss0qy03Y1GeTv4bCyg/edit#gid=491635333")
# fwrite(x = kenmoAreaDataset, file = paste0(DATA_PATH, "Kenmo/confirmedNumberByCity.ja.csv"))
# # Translate
# translateSubData <- fread(paste0(DATA_PATH, "Collection/cityMaster.csv"))
translateColumn <- function(data, column, language, language_data) {
data <- data.table(data)
data[[column]] <- language_data[match(data[[column]], language_data[["ja"]])][[language]]
return(data)
}
# kenmoAreaDataset.cn <- translateColumn(data = kenmoAreaDataset, column = "県名", language = "cn", language_data = translateSubData)
# kenmoAreaDataset.cn <- translateColumn(data = kenmoAreaDataset.cn, column = "市名", language = "cn", language_data = translateSubData)
# fwrite(x = kenmoAreaDataset.cn, file = paste0(DATA_PATH, "Kenmo/confirmedNumberByCity.cn.csv"))
# kenmoAreaDataset.en <- translateColumn(data = kenmoAreaDataset, column = "県名", language = "en", language_data = translateSubData)
# kenmoAreaDataset.en <- translateColumn(data = kenmoAreaDataset.en, column = "市名", language = "en", language_data = translateSubData)
# fwrite(x = kenmoAreaDataset.en, file = paste0(DATA_PATH, "Kenmo/confirmedNumberByCity.en.csv"))
# ====SIGNATEデータ====
# signatePlace <- gsheet2tbl('docs.google.com/spreadsheets/d/1CnQOf6eN18Kw5Q6ScE_9tFoyddk4FBwFZqZpt_tMOm4/edit#gid=103322372')
# fwrite(x = signatePlace, file = paste0(DATA_PATH, 'SIGNATE COVID-2019 Dataset - 接触場所マスタ.csv'))
Update.Signate.Detail <- function(update = F) {
if (update) {
signateDetail <- gsheet2tbl('https://docs.google.com/spreadsheets/d/10MFfRQTblbOpuvOs_yjIYgntpMGBg592dL8veXoPpp4/edit#gid=960903158')
signateDetail <- data.table(signateDetail)
fwrite(x = signateDetail, file = paste0(DATA_PATH, 'SIGNATE COVID-2019 Dataset - 罹患者.csv'))
signateRelation <- gsheet2tbl('https://docs.google.com/spreadsheets/d/1NQ3xrnRi6ta82QtitpJFmIYGvO0wZBmBU5H9EfUGtts/edit#gid=1227116169')
signateRelation <- data.table(signateRelation)
fwrite(x = signateRelation, file = paste0(DATA_PATH, 'Signate/relation.csv'))
# 都道府県、公表日、性別、年齢====
source(file = "02_Utils/ConfirmedPyramidData.R")
fwrite(x = Signate.ConfirmedPyramidData(signateDetail), file = paste0(DATA_PATH, "Generated/genderAgeData.csv"))
# 発症から診断までの日数マップ
source(file = paste0(DATA_PATH, "Academic/onset2ConfirmedMap.R"))
}
}
# Update.Signate.Detail(update = T)
#
# signateLink <- gsheet2tbl('https://docs.google.com/spreadsheets/d/1CnQOf6eN18Kw5Q6ScE_9tFoyddk4FBwFZqZpt_tMOm4/edit#gid=57719256')
# fwrite(x = signateLink, file = paste0(DATA_PATH, 'SIGNATE COVID-2019 Dataset - 罹患者関係.csv'))
# signatePref <- gsheet2tbl('https://docs.google.com/spreadsheets/d/1NQjppYx0QZQmt6706gCOw9DcIDxgnaEy9QTzfeqeMrQ/edit#gid=1940307536')
# fwrite(x = signatePref, file = paste0(DATA_PATH, 'Signate/', 'prefMaster.csv'))
# signateDetail<- fread(paste0(DATA_PATH, 'SIGNATE COVID-2019 Dataset - 罹患者.csv'), header = T)
# signateLink<- fread(paste0(DATA_PATH, 'SIGNATE COVID-2019 Dataset - 罹患者関係.csv'), header = T)
# signatePlace<- fread(paste0(DATA_PATH, 'SIGNATE COVID-2019 Dataset - 接触場所マスタ.csv'), header = T)
saveFileFromApi <- function(jsonResult, patientsFileName, prefCode, pref, NoCol = "No") {
data <- list()
for (i in 1:nrow(jsonResult)) {
data[[i]] <- read.csv(file(jsonResult[i, ]$download_url, encoding = "shift-jis"))
print(jsonResult[i, ]$filename)
if (grepl(patientsFileName, jsonResult[i, ]$filename)) {
patient <- data.table(data[[i]])
print("マージデータ...")
# mergeWithSignate <- merge(patient, signateDetail[都道府県コード == prefCode], by.x = NoCol, by.y = '都道府県別罹患者No')
fwrite(x = patient, file = paste0(DATA_PATH, "/Pref/", pref, "/", jsonResult[i, ]$filename))
} else {
fwrite(x = data[[i]], file = paste0(DATA_PATH, "Pref/", pref, "/", jsonResult[i, ]$filename))
}
}
}
# ==== 北海道 ===
apiUrl <- "https://www.harp.lg.jp/opendata/api/package_show?id=752c577e-0cbe-46e0-bebd-eb47b71b38bf"
jsonFile <- jsonlite::fromJSON(apiUrl)
jsonResult <- jsonFile$result$resources
saveFileFromApi(jsonResult, "patients.csv", 1, "Hokkaido")
# ==== 青森 ====
apiUrl <- "https://opendata.pref.aomori.lg.jp/api/package_show?id=5e4612ce-1636-41d9-82a3-c5130a79ffe0"
jsonFile <- jsonlite::fromJSON(apiUrl)
jsonResult <- jsonFile$result$resources
sapply(paste0(DATA_PATH, "Pref/Aomori/", list.files(path = paste0(DATA_PATH, "Pref/Aomori"))), file.remove)
saveFileFromApi(jsonResult, "陽性患者関係.csv", 2, "Aomori", "NO")
# ====岩手====
dataUrl <- "https://raw.githubusercontent.com/MeditationDuck/covid19/development/data/data.json"
jsonFile <- jsonlite::fromJSON(dataUrl)
pcr <- data.table(
date = as.Date(jsonFile$inspections_summary$labels, "%m/%d"),
検査数 = jsonFile$inspections_summary$data$県内
)
contact <- data.table(
date = as.Date(jsonFile$contacts$data$日付),
相談件数 = jsonFile$contacts$data$小計
)
querent <- data.table(
date = as.Date(jsonFile$querents$data$日付),
一般相談 = jsonFile$querents$data$小計
)
iwateData <- merge(x = pcr, y = contact, by = "date", no.dups = T, all = T)
iwateData <- merge(x = iwateData, y = querent, by = "date", no.dups = T, all = T)
iwateData[is.na(iwateData)] <- 0
iwateData[, 検査数累計 := cumsum(検査数)]
iwateData[, 相談件数累計 := cumsum(相談件数)]
iwateData[, 一般相談累計 := cumsum(一般相談)]
fwrite(x = iwateData, file = paste0(DATA_PATH, "Pref/", "Iwate", "/", "summary.csv"))
# ====宮城====
dataUrl <- "https://raw.githubusercontent.com/code4shiogama/covid19-miyagi/development/data/data.json"
jsonFile <- jsonlite::fromJSON(dataUrl)
pcr <- data.table(
date = as.Date(jsonFile$inspection_persons$labels),
検査数 = jsonFile$inspection_persons$datasets$data[[1]]
)
contact <- data.table(
date = as.Date(jsonFile$contacts$data$日付),
相談件数 = jsonFile$contacts$data$小計
)
positive <- data.table(
date = as.Date(jsonFile$patients_summary$data$日付),
陽性数 = jsonFile$patients_summary$data$小計
)
miyagiData <- merge(x = pcr, y = contact, by = "date", no.dups = T, all = T)
miyagiData <- merge(x = miyagiData, y = positive, by = "date", no.dups = T, all = T)
miyagiData[is.na(miyagiData)] <- 0
miyagiData[, 検査数累計 := cumsum(検査数)]
miyagiData[, 相談件数累計 := cumsum(相談件数)]
miyagiData[, 陽性数累計 := cumsum(陽性数)]
fwrite(x = miyagiData, file = paste0(DATA_PATH, "Pref/", "Miyagi", "/", "summary.csv"))
# ====茨城====
dataUrl <- "https://raw.githubusercontent.com/a01sa01to/covid19-ibaraki/development/data/data.json"
jsonFile <- jsonlite::fromJSON(dataUrl)
pcr <- data.table(
date = as.Date(jsonFile$inspection_persons$labels),
検査数 = jsonFile$inspection_persons$datasets$data[[1]]
)
contact <- data.table(
date = as.Date(jsonFile$contacts$data$date),
相談件数 = jsonFile$contacts$data$total
)
positive <- data.table(
date = as.Date(jsonFile$patients_summary$data$date),
陽性数 = jsonFile$patients_summary$data$total
)
dt <- merge(x = pcr, y = contact, by = "date", no.dups = T, all = T)
dt <- merge(x = dt, y = positive, by = "date", no.dups = T, all = T)
dt[is.na(dt)] <- 0
dt[, paste0(colnames(dt)[2:ncol(dt)], "累計") := lapply(.SD, cumsum), .SDcols = c(2:ncol(dt))]
fwrite(x = dt, file = paste0(DATA_PATH, "Pref/", "Ibaraki", "/", "summary.csv"))
# ====秋田====
# dataUrl <- 'https://raw.githubusercontent.com/asaba-zauberer/covid19-akita/development/data/data.json'
# jsonFile <- fromJSON(dataUrl)
# pcr <- data.table(date = as.Date(jsonFile$inspections_summary$labels, '%m/%d'),
# dailyCheck = jsonFile$inspections_summary$data$県内)
# ====神奈川====
contact <- data.table(read.csv("http://www.pref.kanagawa.jp/osirase/1369/data/csv/contacts.csv", fileEncoding = "cp932"))
contact[, 専用ダイヤル累計 := cumsum(合計)]
querent <- data.table(read.csv("http://www.pref.kanagawa.jp/osirase/1369/data/csv/querent.csv", fileEncoding = "cp932"))
querent[, 相談対応件数累計 := cumsum(相談対応件数)]
patient <- data.table(read.csv("http://www.pref.kanagawa.jp/osirase/1369/data/csv/patient.csv", fileEncoding = "cp932"))
patient$性別 <- as.character(patient$性別)
# patient[性別 == '', 性別 := '調査中']
patient[性別 == "−", 性別 := "非公表"]
patientSummary <- data.table(as.data.frame.matrix(table(patient$発表日, patient$性別)), keep.rownames = T)
dt <- merge(x = contact, y = querent, by.x = "日付", by.y = "日付", all.x = T, no.dups = T)
dt <- merge(x = dt, y = patientSummary, by.x = "日付", by.y = "rn", no.dups = T, all = T)
dt[is.na(dt)] <- 0
dt[, 陽性数 := rowSums(.SD), .SDcols = unique(patient$性別)]
dt[, 累積陽性数 := cumsum(.SD), .SDcols = c("陽性数")]
fwrite(x = dt, file = paste0(DATA_PATH, "Pref/Kanagawa/summary.csv"))
# ====福岡====
patientUrl <- "https://ckan.open-governmentdata.org/dataset/8a9688c2-7b9f-4347-ad6e-de3b339ef740/resource/c27769a2-8634-47aa-9714-7e21c4038dd4/download/400009_pref_fukuoka_covid19_patients.csv"
patient <- read.csv(file(patientUrl))
fwrite(x = data.table(patient), file = paste0(DATA_PATH, "Pref/Fukuoka/patients.csv"))
testUrl <- "https://ckan.open-governmentdata.org/dataset/ef64c68a-d89e-4b1b-a53f-d2535ebfa3a1/resource/aab43191-40d0-4a6a-9724-a9030a596009/download/400009_pref_fukuoka_covid19_exam.csv"
test <- read.csv(file(testUrl))
fwrite(x = data.table(test), file = paste0(DATA_PATH, "Pref/Fukuoka/test.csv"))
contactUrl <- "https://ckan.open-governmentdata.org/dataset/f08d93ce-119a-4e0f-bd23-2a5f00d1d944/resource/a69a2ac1-349b-4f43-a554-f95c07010528/download/400009_pref_fukuoka_covid19_kikokusyasessyokusya.csv"
contact <- read.csv(file(contactUrl))
fwrite(x = data.table(contact), file = paste0(DATA_PATH, "Pref/Fukuoka/call.csv"))
# ====大分====
# ====沖縄====
# jsonUrl <- 'https://raw.githubusercontent.com/Code-for-OKINAWA/covid19/development/data/data.json'
# jsonFile <- fromJSON(jsonUrl)
# jsonFile$patients
# test <- signateDetail[都道府県コード == 47]
|
/00_System/FetchData.R
|
permissive
|
linc-student/2019-ncov-japan
|
R
| false
| false
| 11,679
|
r
|
library(rjson)
library(jsonlite)
library(data.table)
# library(gsheet)
source(file = "01_Settings/Path.R", local = T, encoding = "UTF-8")
source(file = "00_System/Generate.ProcessData.R", local = T, encoding = "UTF-8")
# ====けんもデータ====
# positiveDetail <- gsheet2tbl("docs.google.com/spreadsheets/d/1Cy4W9hYhGmABq1GuhLOkM92iYss0qy03Y1GeTv4bCyg/edit#gid=1196047345")
# fwrite(x = positiveDetail, file = paste0(DATA_PATH, "positiveDetail.csv"))
#
# provincePCR <- gsheet2tbl("docs.google.com/spreadsheets/d/1Cy4W9hYhGmABq1GuhLOkM92iYss0qy03Y1GeTv4bCyg/edit#gid=845297461")
# fwrite(x = provincePCR, file = paste0(DATA_PATH, "provincePCR.csv"))
# 市レベル
# provinceAttr <- fread(paste0(DATA_PATH, "Signate/prefMaster.csv"))
#
# provinceAttr[都道府県コード %in% 1:7, regionName := "北海道・東北地方"]
# provinceAttr[都道府県コード %in% 8:14, regionName := "関東地方"]
# provinceAttr[都道府県コード %in% 15:23, regionName := "中部地方"]
# provinceAttr[都道府県コード %in% 24:30, regionName := "近畿地方"]
# provinceAttr[都道府県コード %in% 31:35, regionName := "中国地方"]
# provinceAttr[都道府県コード %in% 36:39, regionName := "四国地方"]
# provinceAttr[都道府県コード %in% 40:47, regionName := "九州地方・沖縄"]
#
# provinceAttr <- provinceAttr[, .(都道府県, regionName)]
# kenmoAreaDataset <- gsheet2tbl("https://docs.google.com/spreadsheets/d/1Cy4W9hYhGmABq1GuhLOkM92iYss0qy03Y1GeTv4bCyg/edit#gid=491635333")
# fwrite(x = kenmoAreaDataset, file = paste0(DATA_PATH, "Kenmo/confirmedNumberByCity.ja.csv"))
# # Translate
# translateSubData <- fread(paste0(DATA_PATH, "Collection/cityMaster.csv"))
translateColumn <- function(data, column, language, language_data) {
data <- data.table(data)
data[[column]] <- language_data[match(data[[column]], language_data[["ja"]])][[language]]
return(data)
}
# kenmoAreaDataset.cn <- translateColumn(data = kenmoAreaDataset, column = "県名", language = "cn", language_data = translateSubData)
# kenmoAreaDataset.cn <- translateColumn(data = kenmoAreaDataset.cn, column = "市名", language = "cn", language_data = translateSubData)
# fwrite(x = kenmoAreaDataset.cn, file = paste0(DATA_PATH, "Kenmo/confirmedNumberByCity.cn.csv"))
# kenmoAreaDataset.en <- translateColumn(data = kenmoAreaDataset, column = "県名", language = "en", language_data = translateSubData)
# kenmoAreaDataset.en <- translateColumn(data = kenmoAreaDataset.en, column = "市名", language = "en", language_data = translateSubData)
# fwrite(x = kenmoAreaDataset.en, file = paste0(DATA_PATH, "Kenmo/confirmedNumberByCity.en.csv"))
# ====SIGNATEデータ====
# signatePlace <- gsheet2tbl('docs.google.com/spreadsheets/d/1CnQOf6eN18Kw5Q6ScE_9tFoyddk4FBwFZqZpt_tMOm4/edit#gid=103322372')
# fwrite(x = signatePlace, file = paste0(DATA_PATH, 'SIGNATE COVID-2019 Dataset - 接触場所マスタ.csv'))
Update.Signate.Detail <- function(update = F) {
if (update) {
signateDetail <- gsheet2tbl('https://docs.google.com/spreadsheets/d/10MFfRQTblbOpuvOs_yjIYgntpMGBg592dL8veXoPpp4/edit#gid=960903158')
signateDetail <- data.table(signateDetail)
fwrite(x = signateDetail, file = paste0(DATA_PATH, 'SIGNATE COVID-2019 Dataset - 罹患者.csv'))
signateRelation <- gsheet2tbl('https://docs.google.com/spreadsheets/d/1NQ3xrnRi6ta82QtitpJFmIYGvO0wZBmBU5H9EfUGtts/edit#gid=1227116169')
signateRelation <- data.table(signateRelation)
fwrite(x = signateRelation, file = paste0(DATA_PATH, 'Signate/relation.csv'))
# 都道府県、公表日、性別、年齢====
source(file = "02_Utils/ConfirmedPyramidData.R")
fwrite(x = Signate.ConfirmedPyramidData(signateDetail), file = paste0(DATA_PATH, "Generated/genderAgeData.csv"))
# 発症から診断までの日数マップ
source(file = paste0(DATA_PATH, "Academic/onset2ConfirmedMap.R"))
}
}
# Update.Signate.Detail(update = T)
#
# signateLink <- gsheet2tbl('https://docs.google.com/spreadsheets/d/1CnQOf6eN18Kw5Q6ScE_9tFoyddk4FBwFZqZpt_tMOm4/edit#gid=57719256')
# fwrite(x = signateLink, file = paste0(DATA_PATH, 'SIGNATE COVID-2019 Dataset - 罹患者関係.csv'))
# signatePref <- gsheet2tbl('https://docs.google.com/spreadsheets/d/1NQjppYx0QZQmt6706gCOw9DcIDxgnaEy9QTzfeqeMrQ/edit#gid=1940307536')
# fwrite(x = signatePref, file = paste0(DATA_PATH, 'Signate/', 'prefMaster.csv'))
# signateDetail<- fread(paste0(DATA_PATH, 'SIGNATE COVID-2019 Dataset - 罹患者.csv'), header = T)
# signateLink<- fread(paste0(DATA_PATH, 'SIGNATE COVID-2019 Dataset - 罹患者関係.csv'), header = T)
# signatePlace<- fread(paste0(DATA_PATH, 'SIGNATE COVID-2019 Dataset - 接触場所マスタ.csv'), header = T)
saveFileFromApi <- function(jsonResult, patientsFileName, prefCode, pref, NoCol = "No") {
data <- list()
for (i in 1:nrow(jsonResult)) {
data[[i]] <- read.csv(file(jsonResult[i, ]$download_url, encoding = "shift-jis"))
print(jsonResult[i, ]$filename)
if (grepl(patientsFileName, jsonResult[i, ]$filename)) {
patient <- data.table(data[[i]])
print("マージデータ...")
# mergeWithSignate <- merge(patient, signateDetail[都道府県コード == prefCode], by.x = NoCol, by.y = '都道府県別罹患者No')
fwrite(x = patient, file = paste0(DATA_PATH, "/Pref/", pref, "/", jsonResult[i, ]$filename))
} else {
fwrite(x = data[[i]], file = paste0(DATA_PATH, "Pref/", pref, "/", jsonResult[i, ]$filename))
}
}
}
# ==== 北海道 ===
apiUrl <- "https://www.harp.lg.jp/opendata/api/package_show?id=752c577e-0cbe-46e0-bebd-eb47b71b38bf"
jsonFile <- jsonlite::fromJSON(apiUrl)
jsonResult <- jsonFile$result$resources
saveFileFromApi(jsonResult, "patients.csv", 1, "Hokkaido")
# ==== 青森 ====
apiUrl <- "https://opendata.pref.aomori.lg.jp/api/package_show?id=5e4612ce-1636-41d9-82a3-c5130a79ffe0"
jsonFile <- jsonlite::fromJSON(apiUrl)
jsonResult <- jsonFile$result$resources
sapply(paste0(DATA_PATH, "Pref/Aomori/", list.files(path = paste0(DATA_PATH, "Pref/Aomori"))), file.remove)
saveFileFromApi(jsonResult, "陽性患者関係.csv", 2, "Aomori", "NO")
# ====岩手====
dataUrl <- "https://raw.githubusercontent.com/MeditationDuck/covid19/development/data/data.json"
jsonFile <- jsonlite::fromJSON(dataUrl)
pcr <- data.table(
date = as.Date(jsonFile$inspections_summary$labels, "%m/%d"),
検査数 = jsonFile$inspections_summary$data$県内
)
contact <- data.table(
date = as.Date(jsonFile$contacts$data$日付),
相談件数 = jsonFile$contacts$data$小計
)
querent <- data.table(
date = as.Date(jsonFile$querents$data$日付),
一般相談 = jsonFile$querents$data$小計
)
iwateData <- merge(x = pcr, y = contact, by = "date", no.dups = T, all = T)
iwateData <- merge(x = iwateData, y = querent, by = "date", no.dups = T, all = T)
iwateData[is.na(iwateData)] <- 0
iwateData[, 検査数累計 := cumsum(検査数)]
iwateData[, 相談件数累計 := cumsum(相談件数)]
iwateData[, 一般相談累計 := cumsum(一般相談)]
fwrite(x = iwateData, file = paste0(DATA_PATH, "Pref/", "Iwate", "/", "summary.csv"))
# ====宮城====
dataUrl <- "https://raw.githubusercontent.com/code4shiogama/covid19-miyagi/development/data/data.json"
jsonFile <- jsonlite::fromJSON(dataUrl)
pcr <- data.table(
date = as.Date(jsonFile$inspection_persons$labels),
検査数 = jsonFile$inspection_persons$datasets$data[[1]]
)
contact <- data.table(
date = as.Date(jsonFile$contacts$data$日付),
相談件数 = jsonFile$contacts$data$小計
)
positive <- data.table(
date = as.Date(jsonFile$patients_summary$data$日付),
陽性数 = jsonFile$patients_summary$data$小計
)
miyagiData <- merge(x = pcr, y = contact, by = "date", no.dups = T, all = T)
miyagiData <- merge(x = miyagiData, y = positive, by = "date", no.dups = T, all = T)
miyagiData[is.na(miyagiData)] <- 0
miyagiData[, 検査数累計 := cumsum(検査数)]
miyagiData[, 相談件数累計 := cumsum(相談件数)]
miyagiData[, 陽性数累計 := cumsum(陽性数)]
fwrite(x = miyagiData, file = paste0(DATA_PATH, "Pref/", "Miyagi", "/", "summary.csv"))
# ====茨城====
dataUrl <- "https://raw.githubusercontent.com/a01sa01to/covid19-ibaraki/development/data/data.json"
jsonFile <- jsonlite::fromJSON(dataUrl)
pcr <- data.table(
date = as.Date(jsonFile$inspection_persons$labels),
検査数 = jsonFile$inspection_persons$datasets$data[[1]]
)
contact <- data.table(
date = as.Date(jsonFile$contacts$data$date),
相談件数 = jsonFile$contacts$data$total
)
positive <- data.table(
date = as.Date(jsonFile$patients_summary$data$date),
陽性数 = jsonFile$patients_summary$data$total
)
dt <- merge(x = pcr, y = contact, by = "date", no.dups = T, all = T)
dt <- merge(x = dt, y = positive, by = "date", no.dups = T, all = T)
dt[is.na(dt)] <- 0
dt[, paste0(colnames(dt)[2:ncol(dt)], "累計") := lapply(.SD, cumsum), .SDcols = c(2:ncol(dt))]
fwrite(x = dt, file = paste0(DATA_PATH, "Pref/", "Ibaraki", "/", "summary.csv"))
# ====秋田====
# dataUrl <- 'https://raw.githubusercontent.com/asaba-zauberer/covid19-akita/development/data/data.json'
# jsonFile <- fromJSON(dataUrl)
# pcr <- data.table(date = as.Date(jsonFile$inspections_summary$labels, '%m/%d'),
# dailyCheck = jsonFile$inspections_summary$data$県内)
# ====神奈川====
contact <- data.table(read.csv("http://www.pref.kanagawa.jp/osirase/1369/data/csv/contacts.csv", fileEncoding = "cp932"))
contact[, 専用ダイヤル累計 := cumsum(合計)]
querent <- data.table(read.csv("http://www.pref.kanagawa.jp/osirase/1369/data/csv/querent.csv", fileEncoding = "cp932"))
querent[, 相談対応件数累計 := cumsum(相談対応件数)]
patient <- data.table(read.csv("http://www.pref.kanagawa.jp/osirase/1369/data/csv/patient.csv", fileEncoding = "cp932"))
patient$性別 <- as.character(patient$性別)
# patient[性別 == '', 性別 := '調査中']
patient[性別 == "−", 性別 := "非公表"]
patientSummary <- data.table(as.data.frame.matrix(table(patient$発表日, patient$性別)), keep.rownames = T)
dt <- merge(x = contact, y = querent, by.x = "日付", by.y = "日付", all.x = T, no.dups = T)
dt <- merge(x = dt, y = patientSummary, by.x = "日付", by.y = "rn", no.dups = T, all = T)
dt[is.na(dt)] <- 0
dt[, 陽性数 := rowSums(.SD), .SDcols = unique(patient$性別)]
dt[, 累積陽性数 := cumsum(.SD), .SDcols = c("陽性数")]
fwrite(x = dt, file = paste0(DATA_PATH, "Pref/Kanagawa/summary.csv"))
# ====福岡====
patientUrl <- "https://ckan.open-governmentdata.org/dataset/8a9688c2-7b9f-4347-ad6e-de3b339ef740/resource/c27769a2-8634-47aa-9714-7e21c4038dd4/download/400009_pref_fukuoka_covid19_patients.csv"
patient <- read.csv(file(patientUrl))
fwrite(x = data.table(patient), file = paste0(DATA_PATH, "Pref/Fukuoka/patients.csv"))
testUrl <- "https://ckan.open-governmentdata.org/dataset/ef64c68a-d89e-4b1b-a53f-d2535ebfa3a1/resource/aab43191-40d0-4a6a-9724-a9030a596009/download/400009_pref_fukuoka_covid19_exam.csv"
test <- read.csv(file(testUrl))
fwrite(x = data.table(test), file = paste0(DATA_PATH, "Pref/Fukuoka/test.csv"))
contactUrl <- "https://ckan.open-governmentdata.org/dataset/f08d93ce-119a-4e0f-bd23-2a5f00d1d944/resource/a69a2ac1-349b-4f43-a554-f95c07010528/download/400009_pref_fukuoka_covid19_kikokusyasessyokusya.csv"
contact <- read.csv(file(contactUrl))
fwrite(x = data.table(contact), file = paste0(DATA_PATH, "Pref/Fukuoka/call.csv"))
# ====大分====
# ====沖縄====
# jsonUrl <- 'https://raw.githubusercontent.com/Code-for-OKINAWA/covid19/development/data/data.json'
# jsonFile <- fromJSON(jsonUrl)
# jsonFile$patients
# test <- signateDetail[都道府県コード == 47]
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_fstats.R
\name{plot_fstats}
\alias{plot_fstats}
\title{Plot F2, F3, F3star, F4, D or pairwise Fst values with their Confidence Intervals}
\usage{
plot_fstats(
x,
stat.name = "F2",
ci.perc = 95,
value.range = c(NA, NA),
pop.sel = NA,
pop.f3.target = NA,
highlight.signif = TRUE,
main = stat.name,
...
)
}
\arguments{
\item{x}{An object of class fstats (to plot F2, F3 or F4 statistics) or pairwisefst (to plot pairwise fst)}
\item{stat.name}{For fstats object, the name of the stat (either F2, F3, F3star, F4 or Dstat)}
\item{ci.perc}{Percentage of the Confidence Interval in number of standard errors (default=95\%)}
\item{value.range}{Range of test values (x-axis) to be plotted (default=NA,NA: i.e., all test values are plotted)}
\item{pop.sel}{Only plot test values involving these populations (default=NA: i.e., all test values are plotted)}
\item{pop.f3.target}{For F3-statistics, only plot F3 involving pop.f3.target as a target}
\item{highlight.signif}{If TRUE highlight significant tests in red (see details)}
\item{main}{Main title of the plot (default=stat.name)}
\item{...}{Some other graphical arguments to be passed}
}
\value{
A plot of the Fstats of interest. Significant F3 statistics (i.e., showing formal evidence for admixture of the target population) are highlighted in red. Significant F4 statistics (i.e., showing formal evidence against treeness of the pop. quadruplet) are highlighted in red.
}
\description{
Plot F2, F3, F3star, F4, D or pairwise Fst values with their Confidence Intervals
}
\details{
Data will only be plotted if jackknife estimates of the estimator s.e. have been performed i.e. if the functions compute.fstats or compute.pairwiseFST were run with nsnp.per.block>0
}
\examples{
make.example.files(writing.dir=tempdir())
pooldata=popsync2pooldata(sync.file=paste0(tempdir(),"/ex.sync.gz"),
poolsizes=rep(50,15),poolnames=paste0("P",1:15))
res.fstats=compute.fstats(pooldata,nsnp.per.bjack.block=25)
plot_fstats(res.fstats,stat.name="F3",cex=0.5)
plot_fstats(res.fstats,stat.name="F3",value.range=c(NA,0.001),
pop.f3.target=c("P7","P5"),cex.axis=0.7)
plot_fstats(res.fstats,stat.name="F4",cex=0.5)
#allow to reduce the size of the test name (y-axis)
plot_fstats(res.fstats,stat.name="F4",cex=0.5,
pop.sel=c("P1","P2","P3","P4","P5"))
plot_fstats(res.fstats,stat.name="F4",cex=0.5,
pop.sel=c("P1","P2","P3","P4","P5"),highlight.signif=FALSE)
}
\seealso{
To generate x object, see \code{\link{compute.pairwiseFST}} (for pairwisefst object) or \code{\link{compute.fstats}} (for fstats object)
}
|
/man/plot_fstats.Rd
|
no_license
|
cran/poolfstat
|
R
| false
| true
| 2,715
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_fstats.R
\name{plot_fstats}
\alias{plot_fstats}
\title{Plot F2, F3, F3star, F4, D or pairwise Fst values with their Confidence Intervals}
\usage{
plot_fstats(
x,
stat.name = "F2",
ci.perc = 95,
value.range = c(NA, NA),
pop.sel = NA,
pop.f3.target = NA,
highlight.signif = TRUE,
main = stat.name,
...
)
}
\arguments{
\item{x}{An object of class fstats (to plot F2, F3 or F4 statistics) or pairwisefst (to plot pairwise fst)}
\item{stat.name}{For fstats object, the name of the stat (either F2, F3, F3star, F4 or Dstat)}
\item{ci.perc}{Percentage of the Confidence Interval in number of standard errors (default=95\%)}
\item{value.range}{Range of test values (x-axis) to be plotted (default=NA,NA: i.e., all test values are plotted)}
\item{pop.sel}{Only plot test values involving these populations (default=NA: i.e., all test values are plotted)}
\item{pop.f3.target}{For F3-statistics, only plot F3 involving pop.f3.target as a target}
\item{highlight.signif}{If TRUE highlight significant tests in red (see details)}
\item{main}{Main title of the plot (default=stat.name)}
\item{...}{Some other graphical arguments to be passed}
}
\value{
A plot of the Fstats of interest. Significant F3 statistics (i.e., showing formal evidence for admixture of the target population) are highlighted in red. Significant F4 statistics (i.e., showing formal evidence against treeness of the pop. quadruplet) are highlighted in red.
}
\description{
Plot F2, F3, F3star, F4, D or pairwise Fst values with their Confidence Intervals
}
\details{
Data will only be plotted if jackknife estimates of the estimator s.e. have been performed i.e. if the functions compute.fstats or compute.pairwiseFST were run with nsnp.per.block>0
}
\examples{
make.example.files(writing.dir=tempdir())
pooldata=popsync2pooldata(sync.file=paste0(tempdir(),"/ex.sync.gz"),
poolsizes=rep(50,15),poolnames=paste0("P",1:15))
res.fstats=compute.fstats(pooldata,nsnp.per.bjack.block=25)
plot_fstats(res.fstats,stat.name="F3",cex=0.5)
plot_fstats(res.fstats,stat.name="F3",value.range=c(NA,0.001),
pop.f3.target=c("P7","P5"),cex.axis=0.7)
plot_fstats(res.fstats,stat.name="F4",cex=0.5)
#allow to reduce the size of the test name (y-axis)
plot_fstats(res.fstats,stat.name="F4",cex=0.5,
pop.sel=c("P1","P2","P3","P4","P5"))
plot_fstats(res.fstats,stat.name="F4",cex=0.5,
pop.sel=c("P1","P2","P3","P4","P5"),highlight.signif=FALSE)
}
\seealso{
To generate x object, see \code{\link{compute.pairwiseFST}} (for pairwisefst object) or \code{\link{compute.fstats}} (for fstats object)
}
|
x<-c(1,2,3,4,5)
x
x <- c("A","B","C","가","나","다")
x
class(x)
x<-c("1","2","3")
x
x <- "2018-01-18"
class(x)
x <- as.Date("2020-01-18")
x
class(x)
y <- as.Date("2019-01-18")
y
class(y)
x-y
x <- TRUE
y <- FALSE
class(x)
class(y)
# Alt+Enter = 실행
# Alt+ - = <- 자동 입력
# 1차원 - 벡터 / 리스트
# 2차원 - 행렬 / 데이터 프레임
# 3차원 -
# 가공 - 전처리
x <- c(1,2,"a",4)
x
x[2] # R은 1에서 시작한다.
x[3]
x[c(2,3)]
x[2:3]
x[-1] # 숫자에 위치한 걸 빼고 나머지 출력
x[x=='a']
x <- factor(c("M","F","F","M")) # 팩터 구조의 데이터를 변수에 할당
x
class(x)
levels(x) # 범주를 확인
levels(x) <- c("A","B") # F,M을 A,B로 대체
x
x[1]
x[1:3]
# 행렬 : 1차원 벡터를 2차원으로 만든 것
# matrix(데이터, 행개수, 열개수)
# 행 = 가로(좌우) / 열 = 세로(위아래)
x <- matrix(1:20,5,4)
x
x <- matrix(1:20,4,5)
x
class(x) # R 4.0 이하에서는 matrix만 출력됨
|
/rclass1.r
|
no_license
|
sp-moribito/R
|
R
| false
| false
| 1,068
|
r
|
x<-c(1,2,3,4,5)
x
x <- c("A","B","C","가","나","다")
x
class(x)
x<-c("1","2","3")
x
x <- "2018-01-18"
class(x)
x <- as.Date("2020-01-18")
x
class(x)
y <- as.Date("2019-01-18")
y
class(y)
x-y
x <- TRUE
y <- FALSE
class(x)
class(y)
# Alt+Enter = 실행
# Alt+ - = <- 자동 입력
# 1차원 - 벡터 / 리스트
# 2차원 - 행렬 / 데이터 프레임
# 3차원 -
# 가공 - 전처리
x <- c(1,2,"a",4)
x
x[2] # R은 1에서 시작한다.
x[3]
x[c(2,3)]
x[2:3]
x[-1] # 숫자에 위치한 걸 빼고 나머지 출력
x[x=='a']
x <- factor(c("M","F","F","M")) # 팩터 구조의 데이터를 변수에 할당
x
class(x)
levels(x) # 범주를 확인
levels(x) <- c("A","B") # F,M을 A,B로 대체
x
x[1]
x[1:3]
# 행렬 : 1차원 벡터를 2차원으로 만든 것
# matrix(데이터, 행개수, 열개수)
# 행 = 가로(좌우) / 열 = 세로(위아래)
x <- matrix(1:20,5,4)
x
x <- matrix(1:20,4,5)
x
class(x) # R 4.0 이하에서는 matrix만 출력됨
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/context.R
\name{context}
\alias{context}
\alias{n}
\alias{cur_data}
\alias{cur_group}
\alias{cur_group_id}
\alias{cur_group_rows}
\alias{cur_column}
\title{Context dependent expressions}
\usage{
n()
cur_data()
cur_group()
cur_group_id()
cur_group_rows()
cur_column()
}
\description{
These functions return information about the "current" group or "current"
variable, so only work inside specific contexts like \code{summarise()} and
\code{mutate()}
\itemize{
\item \code{n()} gives the current group size.
\item \code{cur_data()} gives the current data for the current group (exclusing
grouping variables)
\item \code{cur_group()} gives the group keys, a tibble with one row and one column
for each grouping variable.
\item \code{cur_group_id()} gives a unique numeric identifier for the current group.
\item \code{cur_column()} gives the name of the current column (in \code{\link[=across]{across()}} only).
}
See \code{\link[=group_data]{group_data()}} for equivalent functions that return values for all
groups.
}
\section{data.table}{
If you're familiar with data.table:
\itemize{
\item \code{cur_data()} <-> \code{.SD}
\item \code{cur_group_id()} <-> \code{.GRP}
\item \code{cur_group()} <-> \code{.BY}
\item \code{cur_group_rows()} <-> \code{.I}
}
}
\examples{
df <- tibble(
g = sample(rep(letters[1:3], 1:3)),
x = runif(6),
y = runif(6)
)
gf <- df \%>\% group_by(g)
gf \%>\% summarise(n = n())
gf \%>\% mutate(id = cur_group_id())
gf \%>\% summarise(row = cur_group_rows())
gf \%>\% summarise(data = list(cur_group()))
gf \%>\% summarise(data = list(cur_data()))
gf \%>\% mutate(across(everything(), ~ paste(cur_column(), round(.x, 2))))
}
|
/man/context.Rd
|
permissive
|
earowang/dplyr
|
R
| false
| true
| 1,742
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/context.R
\name{context}
\alias{context}
\alias{n}
\alias{cur_data}
\alias{cur_group}
\alias{cur_group_id}
\alias{cur_group_rows}
\alias{cur_column}
\title{Context dependent expressions}
\usage{
n()
cur_data()
cur_group()
cur_group_id()
cur_group_rows()
cur_column()
}
\description{
These functions return information about the "current" group or "current"
variable, so only work inside specific contexts like \code{summarise()} and
\code{mutate()}
\itemize{
\item \code{n()} gives the current group size.
\item \code{cur_data()} gives the current data for the current group (exclusing
grouping variables)
\item \code{cur_group()} gives the group keys, a tibble with one row and one column
for each grouping variable.
\item \code{cur_group_id()} gives a unique numeric identifier for the current group.
\item \code{cur_column()} gives the name of the current column (in \code{\link[=across]{across()}} only).
}
See \code{\link[=group_data]{group_data()}} for equivalent functions that return values for all
groups.
}
\section{data.table}{
If you're familiar with data.table:
\itemize{
\item \code{cur_data()} <-> \code{.SD}
\item \code{cur_group_id()} <-> \code{.GRP}
\item \code{cur_group()} <-> \code{.BY}
\item \code{cur_group_rows()} <-> \code{.I}
}
}
\examples{
df <- tibble(
g = sample(rep(letters[1:3], 1:3)),
x = runif(6),
y = runif(6)
)
gf <- df \%>\% group_by(g)
gf \%>\% summarise(n = n())
gf \%>\% mutate(id = cur_group_id())
gf \%>\% summarise(row = cur_group_rows())
gf \%>\% summarise(data = list(cur_group()))
gf \%>\% summarise(data = list(cur_data()))
gf \%>\% mutate(across(everything(), ~ paste(cur_column(), round(.x, 2))))
}
|
library(scater)
library(readr)
library(AUC)
library(aargh)
library(dplyr)
library(stringr)
source("analysis/simulations/parse_funcs.R")
sim_dir <- file.path("data", "simulations")
calculate_rocs_pp <- function(df_small) {
df <- read_csv(df_small$path)
quasi_p_val <- dnorm(0, df$m_beta, sqrt(df$s_beta))
sce <- readRDS(df_small$sceset_path)
is_interaction <- 1 * (fData(sce)$is_interaction)
roc_obj <- AUC::roc(1 - quasi_p_val, factor(is_interaction))
return(AUC::auc(roc_obj))
}
calculate_auc_pp <- function(output_file = "output.csv") {
sim_dir <- file.path("data", "simulations")
all_pp <- dir(file.path(sim_dir, "phenopath_fdata"))
split <- str_split(all_pp, "_")
df_split <- bind_rows(Map(parse_split, split, file.path(sim_dir, "phenopath_fdata", all_pp)))
sceset_paths <- sapply(seq_len(nrow(df_split)), function(i) parse_sceset_path(df_split[i,]))
df_split$sceset_path <- sceset_paths
rocs <- sapply(seq_len(nrow(df_split)), function(i) {
print(df_split[i,1:5])
calculate_rocs_pp(df_split[i,])
})
df_split$auc <- rocs
write_csv(df_split, output_file)
}
aargh(calculate_auc_pp)
|
/analysis/simulations/calculate_auc_phenopath.R
|
no_license
|
kieranrcampbell/phenopath_revisions
|
R
| false
| false
| 1,163
|
r
|
library(scater)
library(readr)
library(AUC)
library(aargh)
library(dplyr)
library(stringr)
source("analysis/simulations/parse_funcs.R")
sim_dir <- file.path("data", "simulations")
calculate_rocs_pp <- function(df_small) {
df <- read_csv(df_small$path)
quasi_p_val <- dnorm(0, df$m_beta, sqrt(df$s_beta))
sce <- readRDS(df_small$sceset_path)
is_interaction <- 1 * (fData(sce)$is_interaction)
roc_obj <- AUC::roc(1 - quasi_p_val, factor(is_interaction))
return(AUC::auc(roc_obj))
}
calculate_auc_pp <- function(output_file = "output.csv") {
sim_dir <- file.path("data", "simulations")
all_pp <- dir(file.path(sim_dir, "phenopath_fdata"))
split <- str_split(all_pp, "_")
df_split <- bind_rows(Map(parse_split, split, file.path(sim_dir, "phenopath_fdata", all_pp)))
sceset_paths <- sapply(seq_len(nrow(df_split)), function(i) parse_sceset_path(df_split[i,]))
df_split$sceset_path <- sceset_paths
rocs <- sapply(seq_len(nrow(df_split)), function(i) {
print(df_split[i,1:5])
calculate_rocs_pp(df_split[i,])
})
df_split$auc <- rocs
write_csv(df_split, output_file)
}
aargh(calculate_auc_pp)
|
#Given a 4x2 matrix containing four points in the x-y plane, determine if the four points form a convex
#quadrilateral. Return TRUE if they do form a convex quadrilateral and FALSE otherwise.# The function
#prototype is: Is.convex.quad <- function(p).
#p is a 4x2 numerical matrix. Each row of p is a point in the x-y plane.
Is.convex.quad <- function(p)
{#Begin Function
#Error Checks
if ( !is.numeric(p) ) { stop("Parameter must be numeric") }
if(!is.matrix(p)) {stop("Parameter must be a matrix")}
if(nrow(p)!=4) {stop("Parameter must have 4 rows")}
if(ncol(p)!=2) {stop("Parameter must have 2 columns")}
#Get points from 4X2 matrix by getting each row.
A = p[1,]
B = p[2,]
C = p[3,]
D = p[4,]
#Get side lengths based on points using the distance formula.
AB = sqrt((B[1]-A[1])^2 + (B[2]-A[2])^2)
BC = sqrt((C[1]-B[1])^2 + (C[2]-B[2])^2)
CD = sqrt((D[1]-C[1])^2 + (D[2]-C[2])^2)
DA = sqrt((A[1]-D[1])^2 + (A[2]-D[2])^2)
#Get diagonal lengths based on points using the distance formula.
AC = sqrt((C[1]-A[1])^2 + (C[2]-A[2])^2)
BD = sqrt((D[1]-B[1])^2 + (D[2]-B[2])^2)
#Get midpoint of diagonals
x = (AC+BD)/2
#Check if convex using Euler's quadrilateral theorem (generalization of parrallelogram law).
#This states if the sum of the lengths squared is equal to the sum of the lengths of the
#diagonals squared plus 4 times the midpoint of the quadralateral, then the quadrilateral is
#convex. Otherwise, it is not convex.
if (AB^2 + BC^2 + CD^2 +DA^2 == AC^2 + BD^2 + 4*(x^2))
{#if
return(TRUE)
}#if
else
{#else
return(FALSE)
}#else
}#End function
#Test for true
p = matrix(c(0,1,0,1,0,0,1,1),ncol=2,nrow=4)
#Test for false
p = matrix(c(0.00,1.00,0.00,0.25,0.00,0.00,1.00,0.25),ncol=2,nrow=4)
|
/RScripts/convex.quad.R
|
no_license
|
fagnersutel/r-programming
|
R
| false
| false
| 1,790
|
r
|
#Given a 4x2 matrix containing four points in the x-y plane, determine if the four points form a convex
#quadrilateral. Return TRUE if they do form a convex quadrilateral and FALSE otherwise.# The function
#prototype is: Is.convex.quad <- function(p).
#p is a 4x2 numerical matrix. Each row of p is a point in the x-y plane.
Is.convex.quad <- function(p)
{#Begin Function
#Error Checks
if ( !is.numeric(p) ) { stop("Parameter must be numeric") }
if(!is.matrix(p)) {stop("Parameter must be a matrix")}
if(nrow(p)!=4) {stop("Parameter must have 4 rows")}
if(ncol(p)!=2) {stop("Parameter must have 2 columns")}
#Get points from 4X2 matrix by getting each row.
A = p[1,]
B = p[2,]
C = p[3,]
D = p[4,]
#Get side lengths based on points using the distance formula.
AB = sqrt((B[1]-A[1])^2 + (B[2]-A[2])^2)
BC = sqrt((C[1]-B[1])^2 + (C[2]-B[2])^2)
CD = sqrt((D[1]-C[1])^2 + (D[2]-C[2])^2)
DA = sqrt((A[1]-D[1])^2 + (A[2]-D[2])^2)
#Get diagonal lengths based on points using the distance formula.
AC = sqrt((C[1]-A[1])^2 + (C[2]-A[2])^2)
BD = sqrt((D[1]-B[1])^2 + (D[2]-B[2])^2)
#Get midpoint of diagonals
x = (AC+BD)/2
#Check if convex using Euler's quadrilateral theorem (generalization of parrallelogram law).
#This states if the sum of the lengths squared is equal to the sum of the lengths of the
#diagonals squared plus 4 times the midpoint of the quadralateral, then the quadrilateral is
#convex. Otherwise, it is not convex.
if (AB^2 + BC^2 + CD^2 +DA^2 == AC^2 + BD^2 + 4*(x^2))
{#if
return(TRUE)
}#if
else
{#else
return(FALSE)
}#else
}#End function
#Test for true
p = matrix(c(0,1,0,1,0,0,1,1),ncol=2,nrow=4)
#Test for false
p = matrix(c(0.00,1.00,0.00,0.25,0.00,0.00,1.00,0.25),ncol=2,nrow=4)
|
mu<- 0
valor<-1000
sigma <- 5.2
sigma2<- 3.4
n <- 25
n2<- 36
xbarra1 <- 81
xbarra2 <- 76
vector <-1:valor
vector2 <- 1:valor
for(i in vector){
vector[i]<- mean(rnorm(n,mu,sigma))
}
for(y in vector2){
vector2[y]<- mean(rnorm(n2,mu,sigma2))
}
vectorfin<- 1:valor
contador<-0
vectorfin<-vector-vector2
for(c in 1:valor){
if(vectorfin[c]>=4.22){
contador=contador+1
}
}
contador
|
/10,30.R
|
no_license
|
FelipeRojas15/PRYE
|
R
| false
| false
| 389
|
r
|
mu<- 0
valor<-1000
sigma <- 5.2
sigma2<- 3.4
n <- 25
n2<- 36
xbarra1 <- 81
xbarra2 <- 76
vector <-1:valor
vector2 <- 1:valor
for(i in vector){
vector[i]<- mean(rnorm(n,mu,sigma))
}
for(y in vector2){
vector2[y]<- mean(rnorm(n2,mu,sigma2))
}
vectorfin<- 1:valor
contador<-0
vectorfin<-vector-vector2
for(c in 1:valor){
if(vectorfin[c]>=4.22){
contador=contador+1
}
}
contador
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(collapse = TRUE, fig.width = 4.8, fig.height = 4.8)
## -----------------------------------------------------------------------------
library(MPBoost)
mpboost(N1 = 6, N2 = 6)
## -----------------------------------------------------------------------------
mpboost(N1 = 6, N2 = 6, MTI = 3)
## -----------------------------------------------------------------------------
mpboost(N1 = 6, N2 = 12)
## -----------------------------------------------------------------------------
set.seed(1) ## Only needed for reproductibility
x1 <- mpboost(N1 = 20, N2 = 40, MTI = 4)
x1
## -----------------------------------------------------------------------------
lx <- sum(x1 == 1)
ly <- sum(x1 == 2)
ratio <- lx/ly
mti <- 4
plot(cumsum(x1 == 1), cumsum(x1 == 2), xlim = c(0, lx), ylim = c(0, ly),
xlab = expression(n[1]), ylab = expression(n[2]), lab = c(lx, ly, 7),
type = "b", pch = 16, panel.first = grid(), col = "red", bty = "n",
xaxs = "i", yaxs = "i", xpd = TRUE, cex.axis = 0.8)
abline(-mti/ratio, 1/ratio, lty = 3)
abline(mti/ratio, 1/ratio, lty = 3)
abline(0, 1/ratio, lty = 2)
## -----------------------------------------------------------------------------
set.seed(11) ## Only needed for reproductibility
x2 <- mpboost(N1 = 20, N2 = 40, MTI = 4)
plot(cumsum(x1 == 1), cumsum(x1 == 2), xlim = c(0, lx), ylim = c(0, ly),
xlab = expression(n[1]), ylab = expression(n[2]), lab = c(lx, ly, 7),
type = "b", pch = 16, panel.first = grid(), col = "red", bty = "n",
xaxs = "i", yaxs = "i", xpd = TRUE, cex.axis = 0.8)
abline(-mti/ratio, 1/ratio, lty = 3)
abline(mti/ratio, 1/ratio, lty = 3)
abline(0, 1/ratio, lty = 2)
lines(cumsum(x2 == 1), cumsum(x2 == 2), type = "b", pch = 16,
col = rgb(0, 0, 1, alpha = 0.5), xpd = TRUE)
|
/inst/doc/mpboost.R
|
no_license
|
cran/MPBoost
|
R
| false
| false
| 1,847
|
r
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(collapse = TRUE, fig.width = 4.8, fig.height = 4.8)
## -----------------------------------------------------------------------------
library(MPBoost)
mpboost(N1 = 6, N2 = 6)
## -----------------------------------------------------------------------------
mpboost(N1 = 6, N2 = 6, MTI = 3)
## -----------------------------------------------------------------------------
mpboost(N1 = 6, N2 = 12)
## -----------------------------------------------------------------------------
set.seed(1) ## Only needed for reproductibility
x1 <- mpboost(N1 = 20, N2 = 40, MTI = 4)
x1
## -----------------------------------------------------------------------------
lx <- sum(x1 == 1)
ly <- sum(x1 == 2)
ratio <- lx/ly
mti <- 4
plot(cumsum(x1 == 1), cumsum(x1 == 2), xlim = c(0, lx), ylim = c(0, ly),
xlab = expression(n[1]), ylab = expression(n[2]), lab = c(lx, ly, 7),
type = "b", pch = 16, panel.first = grid(), col = "red", bty = "n",
xaxs = "i", yaxs = "i", xpd = TRUE, cex.axis = 0.8)
abline(-mti/ratio, 1/ratio, lty = 3)
abline(mti/ratio, 1/ratio, lty = 3)
abline(0, 1/ratio, lty = 2)
## -----------------------------------------------------------------------------
set.seed(11) ## Only needed for reproductibility
x2 <- mpboost(N1 = 20, N2 = 40, MTI = 4)
plot(cumsum(x1 == 1), cumsum(x1 == 2), xlim = c(0, lx), ylim = c(0, ly),
xlab = expression(n[1]), ylab = expression(n[2]), lab = c(lx, ly, 7),
type = "b", pch = 16, panel.first = grid(), col = "red", bty = "n",
xaxs = "i", yaxs = "i", xpd = TRUE, cex.axis = 0.8)
abline(-mti/ratio, 1/ratio, lty = 3)
abline(mti/ratio, 1/ratio, lty = 3)
abline(0, 1/ratio, lty = 2)
lines(cumsum(x2 == 1), cumsum(x2 == 2), type = "b", pch = 16,
col = rgb(0, 0, 1, alpha = 0.5), xpd = TRUE)
|
# trend pnl prediction
symb <- "sh000001"
d<- fread(paste0(getMinuteDataFolder(),getOneTicker(symb),".txt"),
skip = 1,fill = T,select = c(1,2,3,4,5,6),key = "D",col.names = c("D","T","O","H","L","C"))
d <- d[!.N,]
#for shcomp only
fillData000001()
d <- generateIndexMin()
d[, D:=ymd(D)]
#d[, DT:=ymd_hm(paste(D,paste0(str_sub(T,1,str_length(T)-2),":",str_sub(T,str_length(T)-1))))]
#(d[,list(D,T,O,H,L,C)])
d[, .SD[T<1131][.N][,C] , keyby=list(D)]
d1 <- d[, .(open=.SD[1][,O],noon=.SD[T<1131][.N][,C],close=.SD[.N][,C]) , keyby=list(D)]
d1[, .(am=noon/shift(close,1)-1, pm=close/noon-1, cc=close/shift(close,1)-1), keyby=list(D)]
d1[, am:= noon /shift(close,1)-1]
d1[is.na(am), am:= 0]
d1[, pm:= close/noon-1]
d1[, cc:= close/shift(close,1)-1]
d1[is.na(cc),cc:=0 ]
d1[, w:= wday(D)-1]
d1[, mon:= getMonOfWeek(D)]
d1[, cumsumTrend:= cumsum(am) , keyby=list(mon)]
d1[, obsEachWeek:= .N , keyby=list(mon)]
d2 <- d1[obsEachWeek==5, ]
d2<-d2[, .(cumtrendWed=.SD[3][,cumsumTrend], thursAM=.SD[4][,am], friAM=.SD[5][,am]), keyby=list(mon)]
d2[, qplot(cumtrendWed, thursAM)]
d2[, qplot(cumtrendWed, friAM)]
amMat<-dcast.data.table(d1, mon~w, value.var = "am")
pmMat<-dcast.data.table(d1, mon~w, value.var = "pm")
ccMat<-dcast.data.table(d1, mon~w, value.var = "cc")
amMat[,complete:=complete.cases(.SD), keyby=list(mon)]
pmMat[,complete:=complete.cases(.SD), keyby=list(mon)]
ccMat[,complete:=complete.cases(.SD), keyby=list(mon)]
cor(amMat[complete==TRUE, .(`1`,`2`,`3`,`4`,`5`) ])
cor(pmMat[complete==TRUE, .(`1`,`2`,`3`,`4`,`5`) ])
cor(ccMat[complete==TRUE, .(`1`,`2`,`3`,`4`,`5`) ])
d1[, pacf(am)]
d1[, pacf(pm)]
d1[, pacf(cc)]
|
/inst/trendPnlAnalysis.R
|
no_license
|
lukas1421/chinaTrading
|
R
| false
| false
| 1,645
|
r
|
# trend pnl prediction
symb <- "sh000001"
d<- fread(paste0(getMinuteDataFolder(),getOneTicker(symb),".txt"),
skip = 1,fill = T,select = c(1,2,3,4,5,6),key = "D",col.names = c("D","T","O","H","L","C"))
d <- d[!.N,]
#for shcomp only
fillData000001()
d <- generateIndexMin()
d[, D:=ymd(D)]
#d[, DT:=ymd_hm(paste(D,paste0(str_sub(T,1,str_length(T)-2),":",str_sub(T,str_length(T)-1))))]
#(d[,list(D,T,O,H,L,C)])
d[, .SD[T<1131][.N][,C] , keyby=list(D)]
d1 <- d[, .(open=.SD[1][,O],noon=.SD[T<1131][.N][,C],close=.SD[.N][,C]) , keyby=list(D)]
d1[, .(am=noon/shift(close,1)-1, pm=close/noon-1, cc=close/shift(close,1)-1), keyby=list(D)]
d1[, am:= noon /shift(close,1)-1]
d1[is.na(am), am:= 0]
d1[, pm:= close/noon-1]
d1[, cc:= close/shift(close,1)-1]
d1[is.na(cc),cc:=0 ]
d1[, w:= wday(D)-1]
d1[, mon:= getMonOfWeek(D)]
d1[, cumsumTrend:= cumsum(am) , keyby=list(mon)]
d1[, obsEachWeek:= .N , keyby=list(mon)]
d2 <- d1[obsEachWeek==5, ]
d2<-d2[, .(cumtrendWed=.SD[3][,cumsumTrend], thursAM=.SD[4][,am], friAM=.SD[5][,am]), keyby=list(mon)]
d2[, qplot(cumtrendWed, thursAM)]
d2[, qplot(cumtrendWed, friAM)]
amMat<-dcast.data.table(d1, mon~w, value.var = "am")
pmMat<-dcast.data.table(d1, mon~w, value.var = "pm")
ccMat<-dcast.data.table(d1, mon~w, value.var = "cc")
amMat[,complete:=complete.cases(.SD), keyby=list(mon)]
pmMat[,complete:=complete.cases(.SD), keyby=list(mon)]
ccMat[,complete:=complete.cases(.SD), keyby=list(mon)]
cor(amMat[complete==TRUE, .(`1`,`2`,`3`,`4`,`5`) ])
cor(pmMat[complete==TRUE, .(`1`,`2`,`3`,`4`,`5`) ])
cor(ccMat[complete==TRUE, .(`1`,`2`,`3`,`4`,`5`) ])
d1[, pacf(am)]
d1[, pacf(pm)]
d1[, pacf(cc)]
|
######################################################
# Purpose: Plot age trends in C cycle variables
# Inputs:
# - ForC_simplified table
# - Map of the world (in supplenentary resrouces)
# Outputs:
# -
# Developped by: Valentine Herrmann - HerrmannV@si.edu in Arpil 2018
# R version 3.4.4 (2018-03-15)
######################################################
# Clean environment ####
rm(list = ls())
# Set working directory as ForC main folder ####
setwd(".")
# Load libaries ####
library(lme4)
library(multcomp)
library(lsmeans)
library(moments)
library(sp)
library(rgdal)
library(raster)
# Load data ####
ForC_simplified <- read.csv("ForC_simplified/ForC_simplified.csv", stringsAsFactors = F)
Continents <- readOGR("supplementary_resources/World Map data/Continents/World_Continents.shp")
na_codes <- c("NA", "NI", "NRA", "NaN", "NAC")
my_is.na <- function(x) { is.na(x) | x %in% na_codes}
my_na.omit <- function(x) { return(x[!my_is.na(x)])}
# set parameters ####
save.plot = F
# Prepare data ####
## Filter out managed, disturbed and no hisotry info sites
ForC_simplified <- ForC_simplified[ForC_simplified$managed %in% 0 & ForC_simplified$disturbed %in% 0 & ForC_simplified$history.no.info %in%0, ]
## Make stand.age a numeric variable
ForC_simplified$stand.age <- as.numeric(ForC_simplified$stand.age)
## make geographic area and plot.name factors
ForC_simplified$geographic.area <- addNA(ForC_simplified$geographic.area)
ForC_simplified$plot.name <- addNA(ForC_simplified$plot.name)
## Prepare Forest Biomes
### Koeppen region
KOEPPEN <- ifelse(grepl("^A", ForC_simplified$Koeppen), "Tropical",
ifelse(grepl("(^C)|(^D.a$)|(^D.b$)", ForC_simplified$Koeppen), "Temperate",
ifelse(grepl("(^D.c$)|(^D.d$)", ForC_simplified$Koeppen), "Boreal", "Other")))
### Broadleaf vs Conifer
broadleaf_codes <- c("2TEB", "2TDB", "2TB")
conifer_codes <- c("2TEN", "2TDN", "2TN")
Leaf_Trait <- ifelse(ForC_simplified$dominant.veg %in% broadleaf_codes, "broadleaf",
ifelse(ForC_simplified$dominant.veg %in% conifer_codes, "conifer", "Other"))
### combine to get tropical, temperate_broadleaf, temperate_evergreen and boreal
Biome <- paste(KOEPPEN, Leaf_Trait)
table(Biome)
ForC_simplified$Biome <- factor(ifelse(grepl("Boreal", Biome), "Boreal",
ifelse(grepl("Tropical", Biome), "Tropical",
ifelse(Biome %in% "Temperate broadleaf", Biome,
ifelse(Biome %in% "Temperate conifer", Biome, "Other")))))
table(ForC_simplified$Biome)
# Remove Biome Other
ForC_simplified <- droplevels(ForC_simplified[!ForC_simplified$Biome %in% "Other", ])
# order Biomes correctly
ForC_simplified$Biome <- factor(ForC_simplified$Biome, levels = c("Tropical", "Temperate broadleaf", "Temperate conifer", "Boreal"))
# Remove rows with no Age
ForC_simplified <- droplevels(ForC_simplified[!is.na(ForC_simplified$stand.age), ])
## prepare color for biomes
levels(ForC_simplified$Biome)
color.biome <- c( "red", "green", "blue", "cyan2")
## prepare map
Continents <- crop(Continents, extent(-180, 180, -43, 73))
## prepare variables that need to be grouped
ForC_simplified[ForC_simplified$variable.name %in% c("NPP_1", "NPP_2", "NPP_3", "NPP_4", "NPP_5"),]$variable.name <- "NPP"
ForC_simplified[ForC_simplified$variable.name %in% c("ANPP_0", "ANPP_1", "ANPP_2"),]$variable.name <- "ANPP"
ForC_simplified[ForC_simplified$variable.name %in% c("ANPP_litterfall_1", "ANPP_litterfall_2", "ANPP_litterfall_0"),]$variable.name <- "ANPP_litterfall"
#### multiply NEP by -1 anc consider is as NEE
ForC_simplified[ForC_simplified $variable.name %in% c("NEP"),]$mean <- -ForC_simplified[ForC_simplified $variable.name %in% c("NEP"),]$mean
ForC_simplified[ForC_simplified $variable.name %in% c("NEP"),]$variable.name <- "NEE"
## Prepare list of variables to use (those that have at least 30 records in young forest)
response.variables <- names(which(table(ForC_simplified[ForC_simplified$stand.age < 100 & ForC_simplified$stand.age !=0, ]$variable.name)>= 30))
response.variables <- response.variables[!response.variables %in% c("NPP_understory", "total.ecosystem", "soil")]
# Run analysis + plot####
for(response.v in response.variables) {
print(response.v)
# right.skewed.response <- response.v %in% right.skewed_response.variables
### data
df <- ForC_simplified[ForC_simplified$variable.name %in% response.v, ]
df.mature <- df[df$stand.age >= 100, ]
df.young <- df[df$stand.age < 100 & df$stand.age != 0,] # removing 0 because we are taking the log. Removes 28 recorsd
right.skewed.response <- skewness(df.young$mean) > 2 & all(df.young$mean > 0)
### ylim
ylim = range(df$mean)
### model young
mod.young <- lmer(mean ~ log10(stand.age) + Biome + (1|geographic.area/plot.name), data = droplevels(df.young))
drop1.result <- drop1(mod.young, k = log(nrow(df.young)))
age.significant <- drop1.result$AIC[2] > drop1.result$AIC[1]
at.least.10.different.ages.in.each.Biome <- all(tapply(droplevels(df.young)$stand.age, droplevels(df.young)$Biome, function(x) length(x)>=10))
if(age.significant & at.least.10.different.ages.in.each.Biome) mod.young <- lmer(mean ~ log10(stand.age) * Biome + (1|geographic.area/plot.name), data = droplevels(df.young))
# mod.without.age <- lmer(mean ~ Biome + (1|geographic.area/plot.name), data = droplevels(df.young))
# age.significant <- anova(mod.without.age, mod)$"Pr(>Chisq)"[2] < 0.05
newDat <- expand.grid(stand.age = 10^seq(min(log10(df.young$stand.age))+0.01, max(log10(df.young$stand.age)), length.out = 100), Biome = levels(droplevels(df.young$Biome)))
fit <- predict(mod.young, newDat, re.form = NA)
### model mature
mod.mature <- try(lmer(mean ~ Biome + (1|geographic.area/plot.name), data = droplevels(df.mature)), silent = T)
if(!class(mod.mature) %in% "try-error"){
drop1.result <- drop1(mod.mature, k = log(nrow(df.mature)))
biome.significant <- drop1.result$AIC[2] > drop1.result$AIC[1]
if(biome.significant) { # do pairwise comparison
pairwise.comp <- glht(mod.mature, linfct = mcp(Biome = "Tukey"))
pairwise.comp.letter.grouping <- cld(pairwise.comp)
}
}
### plot
if(save.plot) tiff(file = paste0("figures/age_trends/", response.v, ".tiff"), height = 800, width = 1000, units = "px", res = 150)
### layout figure
layout(matrix(c(1,1,2,3), ncol = 2, byrow = T), heights = c(1,2), widths = c(5,1))
### MAP plot all sites ? (even mature?)
par(mar = c(0,0,0,0))
plot(Continents, col = "grey", border = "grey")
sites <- df.young[, c("lat", "lon", "Biome")]
coordinates(sites) <- c("lon", "lat")
points(sites, col = color.biome[df.young$Biome], pch = 4)
sites <- df.mature[, c("lat", "lon", "Biome")]
coordinates(sites) <- c("lon", "lat")
points(sites, col = color.biome[df.mature$Biome], pch = 1)
### Plot young
par(mar = c(5.1,4.1,0,0))
plot(mean ~ stand.age, data = df.young, col = color.biome[df.young$Biome], xlab = "Age (years - log scaled)", ylab = bquote(.(response.v) ~ " (Mg C " ~ ha^{-1}~")"), log = ifelse(right.skewed.response, "xy", "x"), xlim = c(0.999, 100), ylim = ylim, pch = 4, bty = "L", las = 1)
for(b in levels(df$Biome)){
y <- fit[newDat$Biome %in% b]
x <- newDat[newDat$Biome %in% b, ]$stand.age
lines(y ~ x, col = color.biome[levels(df$Biome) %in% b], lty = ifelse(age.significant, 1, 2))
}
mtext(side = 3, line = -1, adj = 0.03, text = paste("n =", nrow(df.young)), cex = 0.5)
## boxplot mature
par(mar = c(5.1,0,0,0))
boxplot(mean ~ Biome, data = droplevels(df.mature), ylim = ylim, axes = F, xlab = "Mature Forest", col = color.biome[as.factor(levels(df$Biome)) %in% df.mature$Biome], outcol =color.biome[as.factor(levels(df$Biome)) %in% df.mature$Biome], log = ifelse(right.skewed.response, "y", ""))
if(biome.significant & !class(mod.mature) %in% "try-error") { # do pairwise comparison
text(x = c(1:length(unique(droplevels(df.mature)$Biome))), y = max(df.mature$mean) + diff(ylim)/50, pairwise.comp.letter.grouping$mcletters$Letters)
}
mtext(side = 1, line = -1, adj = 0.03, text = paste("n =", nrow(df.mature)), cex = 0.5)
if(save.plot) dev.off()
}
# Figure 6 and 7 ERL-review####
for( fig in c("Figure6", "Figure7")) {
if(save.plot) tiff(file = paste0("figures/age_trends/for_ERL_review/", fig, ".tiff"), height = 1000, width = 1000, units = "px", res = 150)
### layout figure
nf <- layout(matrix(c(1,1,4,4,
2,3,5,6,
7,7,10,10,
8,9,11,12,
13,13,16,16,
14,15,17,18), ncol = 4, byrow = T), heights = rep(c(1,2),3), widths = rep(c(4,1),2))
if (fig %in% "Figure6") variables.of.interest <- c("GPP", "NPP", "ANPP", "R_soil", "R_eco", "NEE")
if (fig %in% "Figure7") variables.of.interest <- c("biomass_ag", "biomass_ag_foliage", "biomass_root_fine", "deadwood")
for(response.v in variables.of.interest) {
print(response.v)
# right.skewed.response <- response.v %in% right.skewed_response.variables
### data
df <- ForC_simplified[ForC_simplified$variable.name %in% response.v, ]
df.mature <- df[df$stand.age >= 100, ]
df.young <- df[df$stand.age < 100 & df$stand.age != 0,] # removing 0 because we are taking the log. Removes 28 recorsd
right.skewed.response <- skewness(df.young$mean) > 2 & all(df.young$mean > 0)
### ylim
ylim = range(df$mean)
### model young
mod.young <- lmer(mean ~ log10(stand.age) + Biome + (1|geographic.area/plot.name), data = droplevels(df.young))
drop1.result <- drop1(mod.young, k = log(nrow(df.young)))
age.significant <- drop1.result$AIC[2] > drop1.result$AIC[1]
at.least.10.different.ages.in.each.Biome <- all(tapply(droplevels(df.young)$stand.age, droplevels(df.young)$Biome, function(x) length(x)>=10))
if(age.significant & at.least.10.different.ages.in.each.Biome) mod.young <- lmer(mean ~ log10(stand.age) * Biome + (1|geographic.area/plot.name), data = droplevels(df.young))
# mod.without.age <- lmer(mean ~ Biome + (1|geographic.area/plot.name), data = droplevels(df.young))
# age.significant <- anova(mod.without.age, mod)$"Pr(>Chisq)"[2] < 0.05
newDat <- expand.grid(stand.age = 10^seq(min(log10(df.young$stand.age))+0.01, max(log10(df.young$stand.age)), length.out = 100), Biome = levels(droplevels(df.young$Biome)))
fit <- predict(mod.young, newDat, re.form = NA)
### model mature
mod.mature <- try(lmer(mean ~ Biome + (1|geographic.area/plot.name), data = droplevels(df.mature)), silent = T)
if(!class(mod.mature) %in% "try-error"){
drop1.result <- drop1(mod.mature, k = log(nrow(df.mature)))
biome.significant <- drop1.result$AIC[2] > drop1.result$AIC[1]
if(biome.significant) { # do pairwise comparison
pairwise.comp <- glht(mod.mature, linfct = mcp(Biome = "Tukey"))
pairwise.comp.letter.grouping <- cld(pairwise.comp)
}
}
### plot
### layout figure
# layout(matrix(c(1,1,2,3), ncol = 2, byrow = T), heights = c(1,2), widths = c(5,1))
### MAP plot all sites ? (even mature?)
par(mar = c(0,0,0,0))
plot(Continents, col = "grey", border = "grey")
sites <- df.young[, c("lat", "lon", "Biome")]
coordinates(sites) <- c("lon", "lat")
points(sites, col = color.biome[df.young$Biome], pch = 4)
sites <- df.mature[, c("lat", "lon", "Biome")]
coordinates(sites) <- c("lon", "lat")
points(sites, col = color.biome[df.mature$Biome], pch = 1)
mtext(side = 3, line = -1, adj = 0.05, text = paste0(letters[which(variables.of.interest %in% response.v)], ")"), cex = 0.8)
### Plot young
par(mar = c(5.1,4.1,0,0))
plot(mean ~ stand.age, data = df.young, col = color.biome[df.young$Biome], xlab = "Age (years - log scaled)", ylab = "", log = ifelse(right.skewed.response, "xy", "x"), xlim = c(0.999, 100), ylim = ylim, pch = 4, bty = "L", las = 1, yaxt = "n")
if(!right.skewed.response) axis(2, las = 1)
if(right.skewed.response) axis(2, at = c(0.1, 1, 10, 100, 1000), labels = c("0.1", "1", "10", expression(10^{2}), expression(10^{3})), las = 1) # dput(paste0("expression(10^{", seq(-1, 3), "})"))
for(b in levels(df$Biome)){
y <- fit[newDat$Biome %in% b]
x <- newDat[newDat$Biome %in% b, ]$stand.age
lines(y ~ x, col = color.biome[levels(df$Biome) %in% b], lty = ifelse(age.significant, 1, 2))
}
mtext(side = 2, bquote(.(response.v) ~ " (Mg C " ~ ha^{-1}~")"), cex = 0.65, line = 2)
mtext(side = 3, line = -1, adj = 0.03, text = paste("n =", nrow(df.young)), cex = 0.5)
## boxplot mature
par(mar = c(5.1,0,0,0))
boxplot(mean ~ Biome, data = droplevels(df.mature), ylim = ylim, axes = F, xlab = "Mature", col = color.biome[as.factor(levels(df$Biome)) %in% df.mature$Biome], outcol =color.biome[as.factor(levels(df$Biome)) %in% df.mature$Biome], log = ifelse(right.skewed.response, "y", ""))
if(biome.significant & !class(mod.mature) %in% "try-error") { # do pairwise comparison
text(x = c(1:length(unique(droplevels(df.mature)$Biome))), y = max(df.mature$mean) + diff(ylim)/70, pairwise.comp.letter.grouping$mcletters$Letters)
}
mtext(side = 1, line = 0, adj = 1, text = paste("n =", nrow(df.mature)), cex = 0.5)
Sys.sleep(time = 1) # this is to avoid a problem when NEE is launch while R_eco has not been plotted yet. The NEE was tkaing R_eco's place
} # for(response.v in variables.of.interest)
if(save.plot) dev.off()
} # for( fig in c("Figure6", "Figure7"))
|
/ForC-master/scripts/Figures/Age_trend_in_C_cycle_variables.R
|
no_license
|
mingkaijiang/ForC_analysis
|
R
| false
| false
| 13,849
|
r
|
######################################################
# Purpose: Plot age trends in C cycle variables
# Inputs:
# - ForC_simplified table
# - Map of the world (in supplenentary resrouces)
# Outputs:
# -
# Developped by: Valentine Herrmann - HerrmannV@si.edu in Arpil 2018
# R version 3.4.4 (2018-03-15)
######################################################
# Clean environment ####
rm(list = ls())
# Set working directory as ForC main folder ####
setwd(".")
# Load libaries ####
library(lme4)
library(multcomp)
library(lsmeans)
library(moments)
library(sp)
library(rgdal)
library(raster)
# Load data ####
ForC_simplified <- read.csv("ForC_simplified/ForC_simplified.csv", stringsAsFactors = F)
Continents <- readOGR("supplementary_resources/World Map data/Continents/World_Continents.shp")
na_codes <- c("NA", "NI", "NRA", "NaN", "NAC")
my_is.na <- function(x) { is.na(x) | x %in% na_codes}
my_na.omit <- function(x) { return(x[!my_is.na(x)])}
# set parameters ####
save.plot = F
# Prepare data ####
## Filter out managed, disturbed and no hisotry info sites
ForC_simplified <- ForC_simplified[ForC_simplified$managed %in% 0 & ForC_simplified$disturbed %in% 0 & ForC_simplified$history.no.info %in%0, ]
## Make stand.age a numeric variable
ForC_simplified$stand.age <- as.numeric(ForC_simplified$stand.age)
## make geographic area and plot.name factors
ForC_simplified$geographic.area <- addNA(ForC_simplified$geographic.area)
ForC_simplified$plot.name <- addNA(ForC_simplified$plot.name)
## Prepare Forest Biomes
### Koeppen region
KOEPPEN <- ifelse(grepl("^A", ForC_simplified$Koeppen), "Tropical",
ifelse(grepl("(^C)|(^D.a$)|(^D.b$)", ForC_simplified$Koeppen), "Temperate",
ifelse(grepl("(^D.c$)|(^D.d$)", ForC_simplified$Koeppen), "Boreal", "Other")))
### Broadleaf vs Conifer
broadleaf_codes <- c("2TEB", "2TDB", "2TB")
conifer_codes <- c("2TEN", "2TDN", "2TN")
Leaf_Trait <- ifelse(ForC_simplified$dominant.veg %in% broadleaf_codes, "broadleaf",
ifelse(ForC_simplified$dominant.veg %in% conifer_codes, "conifer", "Other"))
### combine to get tropical, temperate_broadleaf, temperate_evergreen and boreal
Biome <- paste(KOEPPEN, Leaf_Trait)
table(Biome)
ForC_simplified$Biome <- factor(ifelse(grepl("Boreal", Biome), "Boreal",
ifelse(grepl("Tropical", Biome), "Tropical",
ifelse(Biome %in% "Temperate broadleaf", Biome,
ifelse(Biome %in% "Temperate conifer", Biome, "Other")))))
table(ForC_simplified$Biome)
# Remove Biome Other
ForC_simplified <- droplevels(ForC_simplified[!ForC_simplified$Biome %in% "Other", ])
# order Biomes correctly
ForC_simplified$Biome <- factor(ForC_simplified$Biome, levels = c("Tropical", "Temperate broadleaf", "Temperate conifer", "Boreal"))
# Remove rows with no Age
ForC_simplified <- droplevels(ForC_simplified[!is.na(ForC_simplified$stand.age), ])
## prepare color for biomes
levels(ForC_simplified$Biome)
color.biome <- c( "red", "green", "blue", "cyan2")
## prepare map
Continents <- crop(Continents, extent(-180, 180, -43, 73))
## prepare variables that need to be grouped
ForC_simplified[ForC_simplified$variable.name %in% c("NPP_1", "NPP_2", "NPP_3", "NPP_4", "NPP_5"),]$variable.name <- "NPP"
ForC_simplified[ForC_simplified$variable.name %in% c("ANPP_0", "ANPP_1", "ANPP_2"),]$variable.name <- "ANPP"
ForC_simplified[ForC_simplified$variable.name %in% c("ANPP_litterfall_1", "ANPP_litterfall_2", "ANPP_litterfall_0"),]$variable.name <- "ANPP_litterfall"
#### multiply NEP by -1 anc consider is as NEE
ForC_simplified[ForC_simplified $variable.name %in% c("NEP"),]$mean <- -ForC_simplified[ForC_simplified $variable.name %in% c("NEP"),]$mean
ForC_simplified[ForC_simplified $variable.name %in% c("NEP"),]$variable.name <- "NEE"
## Prepare list of variables to use (those that have at least 30 records in young forest)
response.variables <- names(which(table(ForC_simplified[ForC_simplified$stand.age < 100 & ForC_simplified$stand.age !=0, ]$variable.name)>= 30))
response.variables <- response.variables[!response.variables %in% c("NPP_understory", "total.ecosystem", "soil")]
# Run analysis + plot####
for(response.v in response.variables) {
print(response.v)
# right.skewed.response <- response.v %in% right.skewed_response.variables
### data
df <- ForC_simplified[ForC_simplified$variable.name %in% response.v, ]
df.mature <- df[df$stand.age >= 100, ]
df.young <- df[df$stand.age < 100 & df$stand.age != 0,] # removing 0 because we are taking the log. Removes 28 recorsd
right.skewed.response <- skewness(df.young$mean) > 2 & all(df.young$mean > 0)
### ylim
ylim = range(df$mean)
### model young
mod.young <- lmer(mean ~ log10(stand.age) + Biome + (1|geographic.area/plot.name), data = droplevels(df.young))
drop1.result <- drop1(mod.young, k = log(nrow(df.young)))
age.significant <- drop1.result$AIC[2] > drop1.result$AIC[1]
at.least.10.different.ages.in.each.Biome <- all(tapply(droplevels(df.young)$stand.age, droplevels(df.young)$Biome, function(x) length(x)>=10))
if(age.significant & at.least.10.different.ages.in.each.Biome) mod.young <- lmer(mean ~ log10(stand.age) * Biome + (1|geographic.area/plot.name), data = droplevels(df.young))
# mod.without.age <- lmer(mean ~ Biome + (1|geographic.area/plot.name), data = droplevels(df.young))
# age.significant <- anova(mod.without.age, mod)$"Pr(>Chisq)"[2] < 0.05
newDat <- expand.grid(stand.age = 10^seq(min(log10(df.young$stand.age))+0.01, max(log10(df.young$stand.age)), length.out = 100), Biome = levels(droplevels(df.young$Biome)))
fit <- predict(mod.young, newDat, re.form = NA)
### model mature
mod.mature <- try(lmer(mean ~ Biome + (1|geographic.area/plot.name), data = droplevels(df.mature)), silent = T)
if(!class(mod.mature) %in% "try-error"){
drop1.result <- drop1(mod.mature, k = log(nrow(df.mature)))
biome.significant <- drop1.result$AIC[2] > drop1.result$AIC[1]
if(biome.significant) { # do pairwise comparison
pairwise.comp <- glht(mod.mature, linfct = mcp(Biome = "Tukey"))
pairwise.comp.letter.grouping <- cld(pairwise.comp)
}
}
### plot
if(save.plot) tiff(file = paste0("figures/age_trends/", response.v, ".tiff"), height = 800, width = 1000, units = "px", res = 150)
### layout figure
layout(matrix(c(1,1,2,3), ncol = 2, byrow = T), heights = c(1,2), widths = c(5,1))
### MAP plot all sites ? (even mature?)
par(mar = c(0,0,0,0))
plot(Continents, col = "grey", border = "grey")
sites <- df.young[, c("lat", "lon", "Biome")]
coordinates(sites) <- c("lon", "lat")
points(sites, col = color.biome[df.young$Biome], pch = 4)
sites <- df.mature[, c("lat", "lon", "Biome")]
coordinates(sites) <- c("lon", "lat")
points(sites, col = color.biome[df.mature$Biome], pch = 1)
### Plot young
par(mar = c(5.1,4.1,0,0))
plot(mean ~ stand.age, data = df.young, col = color.biome[df.young$Biome], xlab = "Age (years - log scaled)", ylab = bquote(.(response.v) ~ " (Mg C " ~ ha^{-1}~")"), log = ifelse(right.skewed.response, "xy", "x"), xlim = c(0.999, 100), ylim = ylim, pch = 4, bty = "L", las = 1)
for(b in levels(df$Biome)){
y <- fit[newDat$Biome %in% b]
x <- newDat[newDat$Biome %in% b, ]$stand.age
lines(y ~ x, col = color.biome[levels(df$Biome) %in% b], lty = ifelse(age.significant, 1, 2))
}
mtext(side = 3, line = -1, adj = 0.03, text = paste("n =", nrow(df.young)), cex = 0.5)
## boxplot mature
par(mar = c(5.1,0,0,0))
boxplot(mean ~ Biome, data = droplevels(df.mature), ylim = ylim, axes = F, xlab = "Mature Forest", col = color.biome[as.factor(levels(df$Biome)) %in% df.mature$Biome], outcol =color.biome[as.factor(levels(df$Biome)) %in% df.mature$Biome], log = ifelse(right.skewed.response, "y", ""))
if(biome.significant & !class(mod.mature) %in% "try-error") { # do pairwise comparison
text(x = c(1:length(unique(droplevels(df.mature)$Biome))), y = max(df.mature$mean) + diff(ylim)/50, pairwise.comp.letter.grouping$mcletters$Letters)
}
mtext(side = 1, line = -1, adj = 0.03, text = paste("n =", nrow(df.mature)), cex = 0.5)
if(save.plot) dev.off()
}
# Figure 6 and 7 ERL-review####
for( fig in c("Figure6", "Figure7")) {
if(save.plot) tiff(file = paste0("figures/age_trends/for_ERL_review/", fig, ".tiff"), height = 1000, width = 1000, units = "px", res = 150)
### layout figure
nf <- layout(matrix(c(1,1,4,4,
2,3,5,6,
7,7,10,10,
8,9,11,12,
13,13,16,16,
14,15,17,18), ncol = 4, byrow = T), heights = rep(c(1,2),3), widths = rep(c(4,1),2))
if (fig %in% "Figure6") variables.of.interest <- c("GPP", "NPP", "ANPP", "R_soil", "R_eco", "NEE")
if (fig %in% "Figure7") variables.of.interest <- c("biomass_ag", "biomass_ag_foliage", "biomass_root_fine", "deadwood")
for(response.v in variables.of.interest) {
print(response.v)
# right.skewed.response <- response.v %in% right.skewed_response.variables
### data
df <- ForC_simplified[ForC_simplified$variable.name %in% response.v, ]
df.mature <- df[df$stand.age >= 100, ]
df.young <- df[df$stand.age < 100 & df$stand.age != 0,] # removing 0 because we are taking the log. Removes 28 recorsd
right.skewed.response <- skewness(df.young$mean) > 2 & all(df.young$mean > 0)
### ylim
ylim = range(df$mean)
### model young
mod.young <- lmer(mean ~ log10(stand.age) + Biome + (1|geographic.area/plot.name), data = droplevels(df.young))
drop1.result <- drop1(mod.young, k = log(nrow(df.young)))
age.significant <- drop1.result$AIC[2] > drop1.result$AIC[1]
at.least.10.different.ages.in.each.Biome <- all(tapply(droplevels(df.young)$stand.age, droplevels(df.young)$Biome, function(x) length(x)>=10))
if(age.significant & at.least.10.different.ages.in.each.Biome) mod.young <- lmer(mean ~ log10(stand.age) * Biome + (1|geographic.area/plot.name), data = droplevels(df.young))
# mod.without.age <- lmer(mean ~ Biome + (1|geographic.area/plot.name), data = droplevels(df.young))
# age.significant <- anova(mod.without.age, mod)$"Pr(>Chisq)"[2] < 0.05
newDat <- expand.grid(stand.age = 10^seq(min(log10(df.young$stand.age))+0.01, max(log10(df.young$stand.age)), length.out = 100), Biome = levels(droplevels(df.young$Biome)))
fit <- predict(mod.young, newDat, re.form = NA)
### model mature
mod.mature <- try(lmer(mean ~ Biome + (1|geographic.area/plot.name), data = droplevels(df.mature)), silent = T)
if(!class(mod.mature) %in% "try-error"){
drop1.result <- drop1(mod.mature, k = log(nrow(df.mature)))
biome.significant <- drop1.result$AIC[2] > drop1.result$AIC[1]
if(biome.significant) { # do pairwise comparison
pairwise.comp <- glht(mod.mature, linfct = mcp(Biome = "Tukey"))
pairwise.comp.letter.grouping <- cld(pairwise.comp)
}
}
### plot
### layout figure
# layout(matrix(c(1,1,2,3), ncol = 2, byrow = T), heights = c(1,2), widths = c(5,1))
### MAP plot all sites ? (even mature?)
par(mar = c(0,0,0,0))
plot(Continents, col = "grey", border = "grey")
sites <- df.young[, c("lat", "lon", "Biome")]
coordinates(sites) <- c("lon", "lat")
points(sites, col = color.biome[df.young$Biome], pch = 4)
sites <- df.mature[, c("lat", "lon", "Biome")]
coordinates(sites) <- c("lon", "lat")
points(sites, col = color.biome[df.mature$Biome], pch = 1)
mtext(side = 3, line = -1, adj = 0.05, text = paste0(letters[which(variables.of.interest %in% response.v)], ")"), cex = 0.8)
### Plot young
par(mar = c(5.1,4.1,0,0))
plot(mean ~ stand.age, data = df.young, col = color.biome[df.young$Biome], xlab = "Age (years - log scaled)", ylab = "", log = ifelse(right.skewed.response, "xy", "x"), xlim = c(0.999, 100), ylim = ylim, pch = 4, bty = "L", las = 1, yaxt = "n")
if(!right.skewed.response) axis(2, las = 1)
if(right.skewed.response) axis(2, at = c(0.1, 1, 10, 100, 1000), labels = c("0.1", "1", "10", expression(10^{2}), expression(10^{3})), las = 1) # dput(paste0("expression(10^{", seq(-1, 3), "})"))
for(b in levels(df$Biome)){
y <- fit[newDat$Biome %in% b]
x <- newDat[newDat$Biome %in% b, ]$stand.age
lines(y ~ x, col = color.biome[levels(df$Biome) %in% b], lty = ifelse(age.significant, 1, 2))
}
mtext(side = 2, bquote(.(response.v) ~ " (Mg C " ~ ha^{-1}~")"), cex = 0.65, line = 2)
mtext(side = 3, line = -1, adj = 0.03, text = paste("n =", nrow(df.young)), cex = 0.5)
## boxplot mature
par(mar = c(5.1,0,0,0))
boxplot(mean ~ Biome, data = droplevels(df.mature), ylim = ylim, axes = F, xlab = "Mature", col = color.biome[as.factor(levels(df$Biome)) %in% df.mature$Biome], outcol =color.biome[as.factor(levels(df$Biome)) %in% df.mature$Biome], log = ifelse(right.skewed.response, "y", ""))
if(biome.significant & !class(mod.mature) %in% "try-error") { # do pairwise comparison
text(x = c(1:length(unique(droplevels(df.mature)$Biome))), y = max(df.mature$mean) + diff(ylim)/70, pairwise.comp.letter.grouping$mcletters$Letters)
}
mtext(side = 1, line = 0, adj = 1, text = paste("n =", nrow(df.mature)), cex = 0.5)
Sys.sleep(time = 1) # this is to avoid a problem when NEE is launch while R_eco has not been plotted yet. The NEE was tkaing R_eco's place
} # for(response.v in variables.of.interest)
if(save.plot) dev.off()
} # for( fig in c("Figure6", "Figure7"))
|
# -------------------------------------------------------------------------------
# This file is part of Ranger.
#
# Ranger is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ranger is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ranger. If not, see <http://www.gnu.org/licenses/>.
#
# Written by:
#
# Marvin N. Wright
# Institut fuer Medizinische Biometrie und Statistik
# Universitaet zu Luebeck
# Ratzeburger Allee 160
# 23562 Luebeck
# Germany
#
# http://www.imbs-luebeck.de
# -------------------------------------------------------------------------------
##' Ranger is a fast implementation of random forests (Breiman 2001) or recursive partitioning, particularly suited for high dimensional data.
##' Classification, regression, and survival forests are supported.
##' Classification and regression forests are implemented as in the original Random Forest (Breiman 2001), survival forests as in Random Survival Forests (Ishwaran et al. 2008).
##' Includes implementations of extremely randomized trees (Geurts et al. 2006) and quantile regression forests (Meinshausen 2006).
##'
##' The tree type is determined by the type of the dependent variable.
##' For factors classification trees are grown, for numeric values regression trees and for survival objects survival trees.
##' The Gini index is used as default splitting rule for classification.
##' For regression, the estimated response variances or maximally selected rank statistics (Wright et al. 2016) can be used.
##' For Survival the log-rank test, a C-index based splitting rule (Schmid et al. 2015) and maximally selected rank statistics (Wright et al. 2016) are available.
##' For all tree types, forests of extremely randomized trees (Geurts et al. 2006) can be grown.
##'
##' With the \code{probability} option and factor dependent variable a probability forest is grown.
##' Here, the node impurity is used for splitting, as in classification forests.
##' Predictions are class probabilities for each sample.
##' In contrast to other implementations, each tree returns a probability estimate and these estimates are averaged for the forest probability estimate.
##' For details see Malley et al. (2012).
##'
##' Note that for classification and regression nodes with size smaller than \code{min.node.size} can occur, as in original Random Forests.
##' For survival all nodes contain at \code{min.node.size} samples.
##' Variables selected with \code{always.split.variables} are tried additionally to the mtry variables randomly selected.
##' In \code{split.select.weights}, weights do not need to sum up to 1, they will be normalized later.
##' The weights are assigned to the variables in the order they appear in the formula or in the data if no formula is used.
##' Names of the \code{split.select.weights} vector are ignored.
##' The usage of \code{split.select.weights} can increase the computation times for large forests.
##'
##' Unordered factor covariates can be handled in 3 different ways by using \code{respect.unordered.factors}:
##' For 'ignore' all factors are regarded ordered, for 'partition' all possible 2-partitions are considered for splitting.
##' For 'order' and 2-class classification the factor levels are ordered by their proportion falling in the second class, for regression by their mean response, as described in Hastie et al. (2009), chapter 9.2.4.
##' For multiclass classification the factor levels are ordered by the first principal component of the weighted covariance matrix of the contingency table (Coppersmith et al. 1999), for survival by the median survival (or the largest available quantile if the median is not available).
##' The use of 'order' is recommended, as it computationally fast and can handle an unlimited number of factor levels.
##' Note that the factors are only reordered once and not again in each split.
##'
##' The 'impurity_corrected' importance measure is unbiased in terms of the number of categories and category frequencies and is almost as fast as the standard impurity importance.
##' It is a modified version of the method by Sandri & Zuccolotto (2008), which is faster and more memory efficient.
##' See Nembrini et al. (2018) for details.
##' This importance measure can be combined with the methods to estimate p-values in \code{\link{importance_pvalues}}.
##'
##' Regularization works by penalizing new variables by multiplying the splitting criterion by a factor, see Deng & Runger (2012) for details.
##' If \code{regularization.usedepth=TRUE}, \eqn{f^d} is used, where \emph{f} is the regularization factor and \emph{d} the depth of the node.
##' If regularization is used, multithreading is deactivated because all trees need access to the list of variables that are already included in the model.
##'
##' For a large number of variables and data frames as input data the formula interface can be slow or impossible to use.
##' Alternatively \code{dependent.variable.name} (and \code{status.variable.name} for survival) or \code{x} and \code{y} can be used.
##' Use \code{x} and \code{y} with a matrix for \code{x} to avoid conversions and save memory.
##' Consider setting \code{save.memory = TRUE} if you encounter memory problems for very large datasets, but be aware that this option slows down the tree growing.
##'
##' For GWAS data consider combining \code{ranger} with the \code{GenABEL} package.
##' See the Examples section below for a demonstration using \code{Plink} data.
##' All SNPs in the \code{GenABEL} object will be used for splitting.
##' To use only the SNPs without sex or other covariates from the phenotype file, use \code{0} on the right hand side of the formula.
##' Note that missing values are treated as an extra category while splitting.
##'
##' See \url{https://github.com/imbs-hl/ranger} for the development version.
##'
##' With recent R versions, multithreading on Windows platforms should just work.
##' If you compile yourself, the new RTools toolchain is required.
##'
##' @title Ranger
##' @param formula Object of class \code{formula} or \code{character} describing the model to fit. Interaction terms supported only for numerical variables.
##' @param data Training data of class \code{data.frame}, \code{matrix}, \code{dgCMatrix} (Matrix) or \code{gwaa.data} (GenABEL).
##' @param num.trees Number of trees.
##' @param mtry Number of variables to possibly split at in each node. Default is the (rounded down) square root of the number variables. Alternatively, a single argument function returning an integer, given the number of independent variables.
##' @param importance Variable importance mode, one of 'none', 'impurity', 'impurity_corrected', 'permutation'. The 'impurity' measure is the Gini index for classification, the variance of the responses for regression and the sum of test statistics (see \code{splitrule}) for survival.
##' @param write.forest Save \code{ranger.forest} object, required for prediction. Set to \code{FALSE} to reduce memory usage if no prediction intended.
##' @param probability Grow a probability forest as in Malley et al. (2012).
##' @param min.node.size Minimal node size. Default 1 for classification, 5 for regression, 3 for survival, and 10 for probability.
##' @param max.depth Maximal tree depth. A value of NULL or 0 (the default) corresponds to unlimited depth, 1 to tree stumps (1 split per tree).
##' @param replace Sample with replacement.
##' @param sample.fraction Fraction of observations to sample. Default is 1 for sampling with replacement and 0.632 for sampling without replacement. For classification, this can be a vector of class-specific values.
##' @param case.weights Weights for sampling of training observations. Observations with larger weights will be selected with higher probability in the bootstrap (or subsampled) samples for the trees.
##' @param class.weights Weights for the outcome classes (in order of the factor levels) in the splitting rule (cost sensitive learning). Classification and probability prediction only. For classification the weights are also applied in the majority vote in terminal nodes.
##' @param splitrule Splitting rule. For classification and probability estimation "gini", "extratrees" or "hellinger" with default "gini". For regression "variance", "extratrees", "maxstat" or "beta" with default "variance". For survival "logrank", "extratrees", "C" or "maxstat" with default "logrank".
##' @param num.random.splits For "extratrees" splitrule.: Number of random splits to consider for each candidate splitting variable.
##' @param alpha For "maxstat" splitrule: Significance threshold to allow splitting.
##' @param minprop For "maxstat" splitrule: Lower quantile of covariate distribution to be considered for splitting.
##' @param split.select.weights Numeric vector with weights between 0 and 1, representing the probability to select variables for splitting. Alternatively, a list of size num.trees, containing split select weight vectors for each tree can be used.
##' @param always.split.variables Character vector with variable names to be always selected in addition to the \code{mtry} variables tried for splitting.
##' @param respect.unordered.factors Handling of unordered factor covariates. One of 'ignore', 'order' and 'partition'. For the "extratrees" splitrule the default is "partition" for all other splitrules 'ignore'. Alternatively TRUE (='order') or FALSE (='ignore') can be used. See below for details.
##' @param scale.permutation.importance Scale permutation importance by standard error as in (Breiman 2001). Only applicable if permutation variable importance mode selected.
##' @param regularization.factor Regularization factor (gain penalization), either a vector of length p or one value for all variables.
##' @param regularization.usedepth Consider the depth in regularization.
##' @param local.importance Calculate and return local importance values as in (Breiman 2001). Only applicable if \code{importance} is set to 'permutation'.
##' @param keep.inbag Save how often observations are in-bag in each tree.
##' @param inbag Manually set observations per tree. List of size num.trees, containing inbag counts for each observation. Can be used for stratified sampling.
##' @param holdout Hold-out mode. Hold-out all samples with case weight 0 and use these for variable importance and prediction error.
##' @param quantreg Prepare quantile prediction as in quantile regression forests (Meinshausen 2006). Regression only. Set \code{keep.inbag = TRUE} to prepare out-of-bag quantile prediction.
##' @param oob.error Compute OOB prediction error. Set to \code{FALSE} to save computation time, e.g. for large survival forests.
##' @param num.threads Number of threads. Default is number of CPUs available.
##' @param save.memory Use memory saving (but slower) splitting mode. No effect for survival and GWAS data. Warning: This option slows down the tree growing, use only if you encounter memory problems.
##' @param verbose Show computation status and estimated runtime.
##' @param seed Random seed. Default is \code{NULL}, which generates the seed from \code{R}. Set to \code{0} to ignore the \code{R} seed.
##' @param dependent.variable.name Name of dependent variable, needed if no formula given. For survival forests this is the time variable.
##' @param status.variable.name Name of status variable, only applicable to survival data and needed if no formula given. Use 1 for event and 0 for censoring.
##' @param classification Set to \code{TRUE} to grow a classification forest. Only needed if the data is a matrix or the response numeric.
##' @param x Predictor data (independent variables), alternative interface to data with formula or dependent.variable.name.
##' @param y Response vector (dependent variable), alternative interface to data with formula or dependent.variable.name. For survival use a \code{Surv()} object or a matrix with time and status.
##' @return Object of class \code{ranger} with elements
##' \item{\code{forest}}{Saved forest (If write.forest set to TRUE). Note that the variable IDs in the \code{split.varIDs} object do not necessarily represent the column number in R.}
##' \item{\code{predictions}}{Predicted classes/values, based on out of bag samples (classification and regression only).}
##' \item{\code{variable.importance}}{Variable importance for each independent variable.}
##' \item{\code{variable.importance.local}}{Variable importance for each independent variable and each sample, if \code{local.importance} is set to TRUE and \code{importance} is set to 'permutation'.}
##' \item{\code{prediction.error}}{Overall out of bag prediction error. For classification this is the fraction of missclassified samples, for probability estimation the Brier score, for regression the mean squared error and for survival one minus Harrell's C-index.}
##' \item{\code{r.squared}}{R squared. Also called explained variance or coefficient of determination (regression only). Computed on out of bag data.}
##' \item{\code{confusion.matrix}}{Contingency table for classes and predictions based on out of bag samples (classification only).}
##' \item{\code{unique.death.times}}{Unique death times (survival only).}
##' \item{\code{chf}}{Estimated cumulative hazard function for each sample (survival only).}
##' \item{\code{survival}}{Estimated survival function for each sample (survival only).}
##' \item{\code{call}}{Function call.}
##' \item{\code{num.trees}}{Number of trees.}
##' \item{\code{num.independent.variables}}{Number of independent variables.}
##' \item{\code{mtry}}{Value of mtry used.}
##' \item{\code{min.node.size}}{Value of minimal node size used.}
##' \item{\code{treetype}}{Type of forest/tree. classification, regression or survival.}
##' \item{\code{importance.mode}}{Importance mode used.}
##' \item{\code{num.samples}}{Number of samples.}
##' \item{\code{inbag.counts}}{Number of times the observations are in-bag in the trees.}
##' @examples
##' ## Classification forest with default settings
##' ranger(Species ~ ., data = iris)
##'
##' ## Prediction
##' train.idx <- sample(nrow(iris), 2/3 * nrow(iris))
##' iris.train <- iris[train.idx, ]
##' iris.test <- iris[-train.idx, ]
##' rg.iris <- ranger(Species ~ ., data = iris.train)
##' pred.iris <- predict(rg.iris, data = iris.test)
##' table(iris.test$Species, pred.iris$predictions)
##'
##' ## Quantile regression forest
##' rf <- ranger(mpg ~ ., mtcars[1:26, ], quantreg = TRUE)
##' pred <- predict(rf, mtcars[27:32, ], type = "quantiles")
##' pred$predictions
##'
##' ## Variable importance
##' rg.iris <- ranger(Species ~ ., data = iris, importance = "impurity")
##' rg.iris$variable.importance
##'
##' ## Survival forest
##' require(survival)
##' rg.veteran <- ranger(Surv(time, status) ~ ., data = veteran)
##' plot(rg.veteran$unique.death.times, rg.veteran$survival[1,])
##'
##' ## Alternative interfaces (same results)
##' ranger(dependent.variable.name = "Species", data = iris)
##' ranger(y = iris[, 5], x = iris[, -5])
##'
##' \dontrun{
##' ## Use GenABEL interface to read Plink data into R and grow a classification forest
##' ## The ped and map files are not included
##' library(GenABEL)
##' convert.snp.ped("data.ped", "data.map", "data.raw")
##' dat.gwaa <- load.gwaa.data("data.pheno", "data.raw")
##' phdata(dat.gwaa)$trait <- factor(phdata(dat.gwaa)$trait)
##' ranger(trait ~ ., data = dat.gwaa)
##' }
##'
##' @author Marvin N. Wright
##' @references
##' \itemize{
##' \item Wright, M. N. & Ziegler, A. (2017). ranger: A fast implementation of random forests for high dimensional data in C++ and R. J Stat Softw 77:1-17. \url{https://doi.org/10.18637/jss.v077.i01}.
##' \item Schmid, M., Wright, M. N. & Ziegler, A. (2016). On the use of Harrell's C for clinical risk prediction via random survival forests. Expert Syst Appl 63:450-459. \url{https://doi.org/10.1016/j.eswa.2016.07.018}.
##' \item Wright, M. N., Dankowski, T. & Ziegler, A. (2017). Unbiased split variable selection for random survival forests using maximally selected rank statistics. Stat Med 36:1272-1284. \url{https://doi.org/10.1002/sim.7212}.
##' \item Nembrini, S., Koenig, I. R. & Wright, M. N. (2018). The revival of the Gini Importance? Bioinformatics. \url{https://doi.org/10.1093/bioinformatics/bty373}.
##' \item Breiman, L. (2001). Random forests. Mach Learn, 45:5-32. \url{https://doi.org/10.1023/A:1010933404324}.
##' \item Ishwaran, H., Kogalur, U. B., Blackstone, E. H., & Lauer, M. S. (2008). Random survival forests. Ann Appl Stat 2:841-860. \url{https://doi.org/10.1097/JTO.0b013e318233d835}.
##' \item Malley, J. D., Kruppa, J., Dasgupta, A., Malley, K. G., & Ziegler, A. (2012). Probability machines: consistent probability estimation using nonparametric learning machines. Methods Inf Med 51:74-81. \url{https://doi.org/10.3414/ME00-01-0052}.
##' \item Hastie, T., Tibshirani, R., Friedman, J. (2009). The Elements of Statistical Learning. Springer, New York. 2nd edition.
##' \item Geurts, P., Ernst, D., Wehenkel, L. (2006). Extremely randomized trees. Mach Learn 63:3-42. \url{https://doi.org/10.1007/s10994-006-6226-1}.
##' \item Meinshausen (2006). Quantile Regression Forests. J Mach Learn Res 7:983-999. \url{http://www.jmlr.org/papers/v7/meinshausen06a.html}.
##' \item Sandri, M. & Zuccolotto, P. (2008). A bias correction algorithm for the Gini variable importance measure in classification trees. J Comput Graph Stat, 17:611-628. \url{https://doi.org/10.1198/106186008X344522}.
##' \item Coppersmith D., Hong S. J., Hosking J. R. (1999). Partitioning nominal attributes in decision trees. Data Min Knowl Discov 3:197-217. \url{https://doi.org/10.1023/A:1009869804967}.
##' \item Deng & Runger (2012). Feature selection via regularized trees. The 2012 International Joint Conference on Neural Networks (IJCNN), Brisbane, Australia. \url{https://doi.org/10.1109/IJCNN.2012.6252640}.
##' }
##' @seealso \code{\link{predict.ranger}}
##' @useDynLib ranger, .registration = TRUE
##' @importFrom Rcpp evalCpp
##' @import stats
##' @import utils
##' @importFrom Matrix Matrix
##' @export
ranger <- function(formula = NULL, data = NULL, num.trees = 500, mtry = NULL,
importance = "none", write.forest = TRUE, probability = FALSE,
min.node.size = NULL, max.depth = NULL, replace = TRUE,
sample.fraction = ifelse(replace, 1, 0.632),
case.weights = NULL, class.weights = NULL, splitrule = NULL,
num.random.splits = 1, alpha = 0.5, minprop = 0.1,
split.select.weights = NULL, always.split.variables = NULL,
respect.unordered.factors = NULL,
scale.permutation.importance = FALSE,
local.importance = FALSE,
regularization.factor = 1, regularization.usedepth = FALSE,
keep.inbag = FALSE, inbag = NULL, holdout = FALSE,
quantreg = FALSE, oob.error = TRUE,
num.threads = NULL, save.memory = FALSE,
verbose = TRUE, seed = NULL,
dependent.variable.name = NULL, status.variable.name = NULL,
classification = NULL, x = NULL, y = NULL) {
## By default not in GWAS mode
snp.data <- as.matrix(0)
gwa.mode <- FALSE
if (is.null(data)) {
## x/y interface
if (is.null(x) | is.null(y)) {
stop("Error: Either data or x and y is required.")
}
} else {
## GenABEL GWA data
if (inherits(data, "gwaa.data" )) {
snp.names <- data@gtdata@snpnames
snp.data <- data@gtdata@gtps@.Data
data <- data@phdata
if ("id" %in% names(data)) {
data$"id" <- NULL
}
gwa.mode <- TRUE
save.memory <- FALSE
}
## Formula interface. Use whole data frame if no formula provided and depvarname given
if (is.null(formula)) {
if (is.null(dependent.variable.name)) {
if (is.null(y) | is.null(x)) {
stop("Error: Please give formula, dependent variable name or x/y.")
}
} else {
if (is.null(status.variable.name)) {
y <- data[, dependent.variable.name, drop = TRUE]
x <- data[, !(colnames(data) %in% dependent.variable.name), drop = FALSE]
} else {
y <- survival::Surv(data[, dependent.variable.name], data[, status.variable.name])
x <- data[, !(colnames(data) %in% c(dependent.variable.name, status.variable.name)), drop = FALSE]
}
}
} else {
formula <- formula(formula)
if (!inherits(formula, "formula")) {
stop("Error: Invalid formula.")
}
data.selected <- parse.formula(formula, data, env = parent.frame())
y <- data.selected[, 1]
x <- data.selected[, -1, drop = FALSE]
}
}
## Sparse matrix data
if (inherits(x, "Matrix")) {
if (!inherits(x, "dgCMatrix")) {
stop("Error: Currently only sparse data of class 'dgCMatrix' supported.")
}
if (!is.null(formula)) {
stop("Error: Sparse matrices only supported with alternative interface. Use dependent.variable.name or x/y instead of formula.")
}
}
## Check missing values
if (any(is.na(x))) {
offending_columns <- colnames(x)[colSums(is.na(x)) > 0]
stop("Missing data in columns: ",
paste0(offending_columns, collapse = ", "), ".", call. = FALSE)
}
if (any(is.na(y))) {
stop("Missing data in dependent variable.", call. = FALSE)
}
## Check response levels
if (is.factor(y)) {
if (nlevels(y) != nlevels(droplevels(y))) {
dropped_levels <- setdiff(levels(y), levels(droplevels(y)))
warning("Dropped unused factor level(s) in dependent variable: ",
paste0(dropped_levels, collapse = ", "), ".", call. = FALSE)
}
}
## Treetype
if (is.factor(y) || is.logical(y)) {
if (probability) {
treetype <- 9
} else {
treetype <- 1
}
} else if (is.numeric(y) && (is.null(ncol(y)) || ncol(y) == 1)) {
if (!is.null(classification) && classification && !probability) {
treetype <- 1
} else if (probability) {
treetype <- 9
} else {
treetype <- 3
}
} else if (inherits(y, "Surv") || is.data.frame(y) || is.matrix(y)) {
treetype <- 5
} else {
stop("Error: Unsupported type of dependent variable.")
}
## Quantile prediction only for regression
if (quantreg && treetype != 3) {
stop("Error: Quantile prediction implemented only for regression outcomes.")
}
independent.variable.names <- colnames(x)
## respect.unordered.factors
if (is.null(respect.unordered.factors)) {
if (!is.null(splitrule) && splitrule == "extratrees") {
respect.unordered.factors <- "partition"
} else {
respect.unordered.factors <- "ignore"
}
}
## Old version of respect.unordered.factors
if (respect.unordered.factors == TRUE) {
respect.unordered.factors <- "order"
} else if (respect.unordered.factors == FALSE) {
respect.unordered.factors <- "ignore"
}
## Recode characters as factors and recode factors if 'order' mode
if (!is.matrix(x) && !inherits(x, "Matrix") && ncol(x) > 0) {
character.idx <- sapply(x, is.character)
if (respect.unordered.factors == "order") {
## Recode characters and unordered factors
ordered.idx <- sapply(x, is.ordered)
factor.idx <- sapply(x, is.factor)
recode.idx <- character.idx | (factor.idx & !ordered.idx)
if (any(recode.idx) & (importance == "impurity_corrected" || importance == "impurity_unbiased")) {
warning("Corrected impurity importance may not be unbiased for re-ordered factor levels. Consider setting respect.unordered.factors to 'ignore' or 'partition' or manually compute corrected importance.")
}
## Numeric response
if (is.factor(y)) {
num.y <- as.numeric(y)
} else {
num.y <- y
}
## Recode each column
x[recode.idx] <- lapply(x[recode.idx], function(xx) {
if (!is.factor(xx)) {
xx <- as.factor(xx)
}
if (length(levels(xx)) == 1) {
## Don't order if only one level
levels.ordered <- levels(xx)
} else if (inherits(y, "Surv")) {
## Use median survival if available or largest quantile available in all strata if median not available
levels.ordered <- largest.quantile(y ~ xx)
## Get all levels not in node
levels.missing <- setdiff(levels(xx), levels.ordered)
levels.ordered <- c(levels.missing, levels.ordered)
} else if (is.factor(y) & nlevels(y) > 2) {
levels.ordered <- pca.order(y = y, x = xx)
} else {
## Order factor levels by mean response
means <- sapply(levels(xx), function(y) {
mean(num.y[xx == y])
})
levels.ordered <- as.character(levels(xx)[order(means)])
}
## Return reordered factor
factor(xx, levels = levels.ordered, ordered = TRUE, exclude = NULL)
})
## Save levels
covariate.levels <- lapply(x, levels)
} else {
## Recode characters only
x[character.idx] <- lapply(x[character.idx], factor)
}
}
## If gwa mode, add snp variable names
if (gwa.mode) {
all.independent.variable.names <- c(independent.variable.names, snp.names)
} else {
all.independent.variable.names <- independent.variable.names
}
## Error if no covariates
if (length(all.independent.variable.names) < 1) {
stop("Error: No covariates found.")
}
## Number of trees
if (!is.numeric(num.trees) || num.trees < 1) {
stop("Error: Invalid value for num.trees.")
}
## mtry as a function
if (is.function(mtry)) {
nv <- length(all.independent.variable.names)
if (length(formals(mtry)) > 1){
stop("Error: Given mtry function requires single argument (the number of independent variables in the model).")
}
# Evaluate function
mtry <- try(mtry(nv), silent = TRUE)
if (inherits(mtry, "try-error")) {
message("The mtry function produced the error: ", mtry)
stop("Error: mtry function evaluation resulted in an error.")
}
## Check for a single numeric
if (!is.numeric(mtry) || length(mtry) != 1) {
stop("Error: Given mtry function should return a single integer or numeric.")
} else {
mtry <- as.integer(mtry)
}
## Check for limits
if (mtry < 1 || mtry > nv) {
stop("Error: Given mtry function should evaluate to a value not less than 1 and not greater than the number of independent variables ( = ", nv, " )")
}
}
if (is.null(mtry)) {
mtry <- 0
} else if (!is.numeric(mtry) || mtry < 0) {
stop("Error: Invalid value for mtry")
}
## Seed
if (is.null(seed)) {
seed <- runif(1 , 0, .Machine$integer.max)
}
## Keep inbag
if (!is.logical(keep.inbag)) {
stop("Error: Invalid value for keep.inbag")
}
## Num threads
## Default 0 -> detect from system in C++.
if (is.null(num.threads)) {
num.threads = 0
} else if (!is.numeric(num.threads) || num.threads < 0) {
stop("Error: Invalid value for num.threads")
}
## Minumum node size
if (is.null(min.node.size)) {
min.node.size <- 0
} else if (!is.numeric(min.node.size) || min.node.size < 0) {
stop("Error: Invalid value for min.node.size")
}
## Tree depth
if (is.null(max.depth)) {
max.depth <- 0
} else if (!is.numeric(max.depth) || max.depth < 0) {
stop("Error: Invalid value for max.depth. Please give a positive integer.")
}
## Sample fraction
if (!is.numeric(sample.fraction)) {
stop("Error: Invalid value for sample.fraction. Please give a value in (0,1] or a vector of values in [0,1].")
}
if (length(sample.fraction) > 1) {
if (!(treetype %in% c(1, 9))) {
stop("Error: Invalid value for sample.fraction. Vector values only valid for classification forests.")
}
if (any(sample.fraction < 0) || any(sample.fraction > 1)) {
stop("Error: Invalid value for sample.fraction. Please give a value in (0,1] or a vector of values in [0,1].")
}
if (sum(sample.fraction) <= 0) {
stop("Error: Invalid value for sample.fraction. Sum of values must be >0.")
}
if (length(sample.fraction) != nlevels(y)) {
stop("Error: Invalid value for sample.fraction. Expecting ", nlevels(y), " values, provided ", length(sample.fraction), ".")
}
if (!replace & any(sample.fraction * length(y) > table(y))) {
idx <- which(sample.fraction * length(y) > table(y))[1]
stop("Error: Not enough samples in class ", names(idx),
"; available: ", table(y)[idx],
", requested: ", (sample.fraction * length(y))[idx], ".")
}
if (!is.null(case.weights)) {
stop("Error: Combination of case.weights and class-wise sampling not supported.")
}
# Fix order (C++ needs sample.fraction in order as classes appear in data)
sample.fraction <- sample.fraction[as.numeric(unique(y))]
} else {
if (sample.fraction <= 0 || sample.fraction > 1) {
stop("Error: Invalid value for sample.fraction. Please give a value in (0,1] or a vector of values in [0,1].")
}
}
# Regularization
if (all(regularization.factor == 1)) {
regularization.factor <- c(0, 0)
use.regularization.factor <- FALSE
} else {
# Deactivation of paralellization
if (num.threads != 1) {
num.threads <- 1
warning("Paralellization deactivated (regularization used).")
}
use.regularization.factor <- TRUE
}
if (use.regularization.factor) {
# A few checkings on the regularization coefficients
if (max(regularization.factor) > 1) {
stop("The regularization coefficients cannot be greater than 1.")
}
if (max(regularization.factor) <= 0) {
stop("The regularization coefficients cannot be smaller than 0.")
}
p <- length(all.independent.variable.names)
if (length(regularization.factor) != 1 && length(regularization.factor) != p) {
stop("You must use 1 or p (the number of predictor variables)
regularization coefficients.")
}
if (length(regularization.factor) == 1) {
regularization.factor = rep(regularization.factor, p)
}
}
## Importance mode
if (is.null(importance) || importance == "none") {
importance.mode <- 0
} else if (importance == "impurity") {
importance.mode <- 1
} else if (importance == "impurity_corrected" || importance == "impurity_unbiased") {
importance.mode <- 5
} else if (importance == "permutation") {
if (local.importance) {
importance.mode <- 6
} else if (scale.permutation.importance) {
importance.mode <- 2
} else {
importance.mode <- 3
}
} else {
stop("Error: Unknown importance mode.")
}
## Case weights: NULL for no weights or all weights equal
if (is.null(case.weights) || length(unique(case.weights)) == 1) {
case.weights <- c(0,0)
use.case.weights <- FALSE
if (holdout) {
stop("Error: Case weights required to use holdout mode.")
}
} else {
use.case.weights <- TRUE
## Sample from non-zero weights in holdout mode
if (holdout) {
sample.fraction <- sample.fraction * mean(case.weights > 0)
}
if (!replace && sum(case.weights > 0) < sample.fraction * nrow(x)) {
stop("Error: Fewer non-zero case weights than observations to sample.")
}
}
## Manual inbag selection
if (is.null(inbag)) {
inbag <- list(c(0,0))
use.inbag <- FALSE
} else if (is.list(inbag)) {
use.inbag <- TRUE
if (use.case.weights) {
stop("Error: Combination of case.weights and inbag not supported.")
}
if (length(sample.fraction) > 1) {
stop("Error: Combination of class-wise sampling and inbag not supported.")
}
if (length(inbag) != num.trees) {
stop("Error: Size of inbag list not equal to number of trees.")
}
} else {
stop("Error: Invalid inbag, expects list of vectors of size num.trees.")
}
## Class weights: NULL for no weights (all 1)
if (is.null(class.weights)) {
class.weights <- rep(1, nlevels(y))
} else {
if (!(treetype %in% c(1, 9))) {
stop("Error: Argument class.weights only valid for classification forests.")
}
if (!is.numeric(class.weights) || any(class.weights < 0)) {
stop("Error: Invalid value for class.weights. Please give a vector of non-negative values.")
}
if (length(class.weights) != nlevels(y)) {
stop("Error: Number of class weights not equal to number of classes.")
}
## Reorder (C++ expects order as appearing in the data)
class.weights <- class.weights[unique(as.numeric(y))]
}
## Split select weights: NULL for no weights
if (is.null(split.select.weights)) {
split.select.weights <- list(c(0,0))
use.split.select.weights <- FALSE
} else if (is.numeric(split.select.weights)) {
if (length(split.select.weights) != length(all.independent.variable.names)) {
stop("Error: Number of split select weights not equal to number of independent variables.")
}
split.select.weights <- list(split.select.weights)
use.split.select.weights <- TRUE
} else if (is.list(split.select.weights)) {
if (length(split.select.weights) != num.trees) {
stop("Error: Size of split select weights list not equal to number of trees.")
}
use.split.select.weights <- TRUE
} else {
stop("Error: Invalid split select weights.")
}
## Always split variables: NULL for no variables
if (is.null(always.split.variables)) {
always.split.variables <- c("0", "0")
use.always.split.variables <- FALSE
} else {
use.always.split.variables <- TRUE
}
if (use.split.select.weights && use.always.split.variables) {
stop("Error: Please use only one option of split.select.weights and always.split.variables.")
}
## Splitting rule
if (is.null(splitrule)) {
if (treetype == 5) {
splitrule <- "logrank"
} else if (treetype == 3) {
splitrule <- "variance"
} else if (treetype %in% c(1, 9)) {
splitrule <- "gini"
}
splitrule.num <- 1
} else if (splitrule == "logrank") {
if (treetype == 5) {
splitrule.num <- 1
} else {
stop("Error: logrank splitrule applicable to survival data only.")
}
} else if (splitrule == "gini") {
if (treetype %in% c(1, 9)) {
splitrule.num <- 1
} else {
stop("Error: Gini splitrule applicable to classification data only.")
}
} else if (splitrule == "variance") {
if (treetype == 3) {
splitrule.num <- 1
} else {
stop("Error: variance splitrule applicable to regression data only.")
}
} else if (splitrule == "auc" || splitrule == "C") {
if (treetype == 5) {
splitrule.num <- 2
} else {
stop("Error: C index splitrule applicable to survival data only.")
}
} else if (splitrule == "auc_ignore_ties" || splitrule == "C_ignore_ties") {
if (treetype == 5) {
splitrule.num <- 3
} else {
stop("Error: C index splitrule applicable to survival data only.")
}
} else if (splitrule == "maxstat") {
if (treetype == 5 || treetype == 3) {
splitrule.num <- 4
} else {
stop("Error: maxstat splitrule applicable to regression or survival data only.")
}
} else if (splitrule == "extratrees") {
splitrule.num <- 5
} else if (splitrule == "beta") {
if (treetype == 3) {
splitrule.num <- 6
} else {
stop("Error: beta splitrule applicable to regression data only.")
}
## Check for 0..1 outcome
if (min(y) < 0 || max(y) > 1) {
stop("Error: beta splitrule applicable to regression data with outcome between 0 and 1 only.")
}
} else if (splitrule == "hellinger") {
if (treetype %in% c(1, 9)) {
splitrule.num <- 7
} else {
stop("Error: Hellinger splitrule only implemented for binary classification.")
}
if ((is.factor(y) && nlevels(y) > 2) || (length(unique(y)) > 2)) {
stop("Error: Hellinger splitrule only implemented for binary classification.")
}
} else {
stop("Error: Unknown splitrule.")
}
## Maxstat splitting
if (alpha < 0 || alpha > 1) {
stop("Error: Invalid value for alpha, please give a value between 0 and 1.")
}
if (minprop < 0 || minprop > 0.5) {
stop("Error: Invalid value for minprop, please give a value between 0 and 0.5.")
}
if (splitrule == "maxstat" & use.regularization.factor) {
stop("Error: Regularization cannot be used with 'maxstat' splitrule.")
}
## Extra trees
if (!is.numeric(num.random.splits) || num.random.splits < 1) {
stop("Error: Invalid value for num.random.splits, please give a positive integer.")
}
if (splitrule.num == 5 && save.memory && respect.unordered.factors == "partition") {
stop("Error: save.memory option not possible in extraTrees mode with unordered predictors.")
}
if (num.random.splits > 1 && splitrule.num != 5) {
warning("Argument 'num.random.splits' ignored if splitrule is not 'extratrees'.")
}
## Unordered factors
if (respect.unordered.factors == "partition") {
ordered.idx <- sapply(x, is.ordered)
factor.idx <- sapply(x, is.factor)
unordered.factor.variables <- independent.variable.names[factor.idx & !ordered.idx]
if (length(unordered.factor.variables) > 0) {
use.unordered.factor.variables <- TRUE
## Check level count
num.levels <- sapply(x[, factor.idx & !ordered.idx, drop = FALSE], nlevels)
max.level.count <- .Machine$double.digits
if (max(num.levels) > max.level.count) {
stop(paste("Too many levels in unordered categorical variable ", unordered.factor.variables[which.max(num.levels)],
". Only ", max.level.count, " levels allowed on this system. Consider using the 'order' option.", sep = ""))
}
} else {
unordered.factor.variables <- c("0", "0")
use.unordered.factor.variables <- FALSE
}
} else if (respect.unordered.factors == "ignore" || respect.unordered.factors == "order") {
## Ordering for "order" is handled above
unordered.factor.variables <- c("0", "0")
use.unordered.factor.variables <- FALSE
} else {
stop("Error: Invalid value for respect.unordered.factors, please use 'order', 'partition' or 'ignore'.")
}
## Unordered maxstat splitting not possible
if (use.unordered.factor.variables && !is.null(splitrule)) {
if (splitrule == "maxstat") {
stop("Error: Unordered factor splitting not implemented for 'maxstat' splitting rule.")
} else if (splitrule %in% c("C", "auc", "C_ignore_ties", "auc_ignore_ties")) {
stop("Error: Unordered factor splitting not implemented for 'C' splitting rule.")
} else if (splitrule == "beta") {
stop("Error: Unordered factor splitting not implemented for 'beta' splitting rule.")
}
}
## Warning for experimental 'order' splitting
if (respect.unordered.factors == "order") {
if (treetype == 3 && splitrule == "maxstat") {
warning("Warning: The 'order' mode for unordered factor handling with the 'maxstat' splitrule is experimental.")
}
if (gwa.mode & ((treetype %in% c(1,9) & nlevels(y) > 2) | treetype == 5)) {
stop("Error: Ordering of SNPs currently only implemented for regression and binary outcomes.")
}
}
## Prediction mode always false. Use predict.ranger() method.
prediction.mode <- FALSE
predict.all <- FALSE
prediction.type <- 1
## No loaded forest object
loaded.forest <- list()
## Use sparse matrix
if (inherits(x, "dgCMatrix")) {
sparse.x <- x
x <- matrix(c(0, 0))
use.sparse.data <- TRUE
} else {
sparse.x <- Matrix(matrix(c(0, 0)))
use.sparse.data <- FALSE
if (is.data.frame(x)) {
x <- data.matrix(x)
}
}
if (treetype == 5) {
y.mat <- as.matrix(y)
} else {
y.mat <- as.matrix(as.numeric(y))
}
if (respect.unordered.factors == "order"){
order.snps <- TRUE
} else {
order.snps <- FALSE
}
## No competing risks check
if (treetype == 5) {
if (!all(y.mat[, 2] %in% 0:1)) {
stop("Error: Competing risks not supported yet. Use status=1 for events and status=0 for censoring.")
}
}
## Call Ranger
result <- rangerCpp(treetype, x, y.mat, independent.variable.names, mtry,
num.trees, verbose, seed, num.threads, write.forest, importance.mode,
min.node.size, split.select.weights, use.split.select.weights,
always.split.variables, use.always.split.variables,
prediction.mode, loaded.forest, snp.data,
replace, probability, unordered.factor.variables, use.unordered.factor.variables,
save.memory, splitrule.num, case.weights, use.case.weights, class.weights,
predict.all, keep.inbag, sample.fraction, alpha, minprop, holdout, prediction.type,
num.random.splits, sparse.x, use.sparse.data, order.snps, oob.error, max.depth,
inbag, use.inbag,
regularization.factor, use.regularization.factor, regularization.usedepth)
if (length(result) == 0) {
stop("User interrupt or internal error.")
}
## Prepare results
if (importance.mode != 0) {
names(result$variable.importance) <- all.independent.variable.names
if (importance.mode == 6) {
# process casewise vimp
result$variable.importance.local <-
matrix(
result$variable.importance.local,
byrow = FALSE,
ncol = length(all.independent.variable.names),
dimnames = list(
rownames(data),
all.independent.variable.names
)
)
}
}
## Set predictions
if (treetype == 1 && oob.error) {
if (is.factor(y)) {
result$predictions <- integer.to.factor(result$predictions,
levels(y))
}
result$confusion.matrix <- table(y, result$predictions,
dnn = c("true", "predicted"), useNA = "ifany")
} else if (treetype == 5 && oob.error) {
if (is.list(result$predictions)) {
result$predictions <- do.call(rbind, result$predictions)
}
if (is.vector(result$predictions)) {
result$predictions <- matrix(result$predictions, nrow = 1)
}
result$chf <- result$predictions
result$predictions <- NULL
result$survival <- exp(-result$chf)
} else if (treetype == 9 && oob.error) {
if (is.list(result$predictions)) {
result$predictions <- do.call(rbind, result$predictions)
}
if (is.vector(result$predictions)) {
result$predictions <- matrix(result$predictions, nrow = 1)
}
## Set colnames and sort by levels
colnames(result$predictions) <- unique(y)
if (is.factor(y)) {
result$predictions <- result$predictions[, levels(droplevels(y)), drop = FALSE]
}
}
## Splitrule
result$splitrule <- splitrule
if (splitrule == "extratrees") {
result$num.random.splits <- num.random.splits
}
## Set treetype
if (treetype == 1) {
result$treetype <- "Classification"
} else if (treetype == 3) {
result$treetype <- "Regression"
} else if (treetype == 5) {
result$treetype <- "Survival"
} else if (treetype == 9) {
result$treetype <- "Probability estimation"
}
if (treetype == 3) {
result$r.squared <- 1 - result$prediction.error / var(y)
}
result$call <- sys.call()
result$importance.mode <- importance
if (use.sparse.data) {
result$num.samples <- nrow(sparse.x)
} else {
result$num.samples <- nrow(x)
}
result$replace <- replace
## Write forest object
if (write.forest) {
if (is.factor(y)) {
result$forest$levels <- levels(y)
}
result$forest$independent.variable.names <- independent.variable.names
result$forest$treetype <- result$treetype
class(result$forest) <- "ranger.forest"
## In 'ordered' mode, save covariate levels
if (respect.unordered.factors == "order" && ncol(x) > 0) {
result$forest$covariate.levels <- covariate.levels
}
}
class(result) <- "ranger"
## Prepare quantile prediction
if (quantreg) {
terminal.nodes <- predict(result, x, type = "terminalNodes")$predictions + 1
n <- result$num.samples
result$random.node.values <- matrix(nrow = max(terminal.nodes), ncol = num.trees)
## Select one random obs per node and tree
for (tree in 1:num.trees){
idx <- sample(1:n, n)
result$random.node.values[terminal.nodes[idx, tree], tree] <- y[idx]
}
## Prepare out-of-bag quantile regression
if(!is.null(result$inbag.counts)) {
inbag.counts <- simplify2array(result$inbag.counts)
random.node.values.oob <- 0 * terminal.nodes
random.node.values.oob[inbag.counts > 0] <- NA
## For each tree and observation select one random obs in the same node (not the same obs)
for (tree in 1:num.trees){
is.oob <- inbag.counts[, tree] == 0
num.oob <- sum(is.oob)
if (num.oob != 0) {
oob.obs <- which(is.oob)
oob.nodes <- terminal.nodes[oob.obs, tree]
for (j in 1:num.oob) {
idx <- terminal.nodes[, tree] == oob.nodes[j]
idx[oob.obs[j]] <- FALSE
random.node.values.oob[oob.obs[j], tree] <- save.sample(y[idx], size = 1)
}
}
}
## Check num.trees
minoob <- min(rowSums(inbag.counts == 0))
if (minoob < 10) {
stop("Error: Too few trees for out-of-bag quantile regression.")
}
## Use the same number of values for all obs, select randomly
result$random.node.values.oob <- t(apply(random.node.values.oob, 1, function(x) {
sample(x[!is.na(x)], minoob)
}))
}
}
return(result)
}
|
/R/ranger.R
|
no_license
|
dplecko/ranger
|
R
| false
| false
| 46,601
|
r
|
# -------------------------------------------------------------------------------
# This file is part of Ranger.
#
# Ranger is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ranger is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ranger. If not, see <http://www.gnu.org/licenses/>.
#
# Written by:
#
# Marvin N. Wright
# Institut fuer Medizinische Biometrie und Statistik
# Universitaet zu Luebeck
# Ratzeburger Allee 160
# 23562 Luebeck
# Germany
#
# http://www.imbs-luebeck.de
# -------------------------------------------------------------------------------
##' Ranger is a fast implementation of random forests (Breiman 2001) or recursive partitioning, particularly suited for high dimensional data.
##' Classification, regression, and survival forests are supported.
##' Classification and regression forests are implemented as in the original Random Forest (Breiman 2001), survival forests as in Random Survival Forests (Ishwaran et al. 2008).
##' Includes implementations of extremely randomized trees (Geurts et al. 2006) and quantile regression forests (Meinshausen 2006).
##'
##' The tree type is determined by the type of the dependent variable.
##' For factors classification trees are grown, for numeric values regression trees and for survival objects survival trees.
##' The Gini index is used as default splitting rule for classification.
##' For regression, the estimated response variances or maximally selected rank statistics (Wright et al. 2016) can be used.
##' For Survival the log-rank test, a C-index based splitting rule (Schmid et al. 2015) and maximally selected rank statistics (Wright et al. 2016) are available.
##' For all tree types, forests of extremely randomized trees (Geurts et al. 2006) can be grown.
##'
##' With the \code{probability} option and factor dependent variable a probability forest is grown.
##' Here, the node impurity is used for splitting, as in classification forests.
##' Predictions are class probabilities for each sample.
##' In contrast to other implementations, each tree returns a probability estimate and these estimates are averaged for the forest probability estimate.
##' For details see Malley et al. (2012).
##'
##' Note that for classification and regression nodes with size smaller than \code{min.node.size} can occur, as in original Random Forests.
##' For survival all nodes contain at \code{min.node.size} samples.
##' Variables selected with \code{always.split.variables} are tried additionally to the mtry variables randomly selected.
##' In \code{split.select.weights}, weights do not need to sum up to 1, they will be normalized later.
##' The weights are assigned to the variables in the order they appear in the formula or in the data if no formula is used.
##' Names of the \code{split.select.weights} vector are ignored.
##' The usage of \code{split.select.weights} can increase the computation times for large forests.
##'
##' Unordered factor covariates can be handled in 3 different ways by using \code{respect.unordered.factors}:
##' For 'ignore' all factors are regarded ordered, for 'partition' all possible 2-partitions are considered for splitting.
##' For 'order' and 2-class classification the factor levels are ordered by their proportion falling in the second class, for regression by their mean response, as described in Hastie et al. (2009), chapter 9.2.4.
##' For multiclass classification the factor levels are ordered by the first principal component of the weighted covariance matrix of the contingency table (Coppersmith et al. 1999), for survival by the median survival (or the largest available quantile if the median is not available).
##' The use of 'order' is recommended, as it computationally fast and can handle an unlimited number of factor levels.
##' Note that the factors are only reordered once and not again in each split.
##'
##' The 'impurity_corrected' importance measure is unbiased in terms of the number of categories and category frequencies and is almost as fast as the standard impurity importance.
##' It is a modified version of the method by Sandri & Zuccolotto (2008), which is faster and more memory efficient.
##' See Nembrini et al. (2018) for details.
##' This importance measure can be combined with the methods to estimate p-values in \code{\link{importance_pvalues}}.
##'
##' Regularization works by penalizing new variables by multiplying the splitting criterion by a factor, see Deng & Runger (2012) for details.
##' If \code{regularization.usedepth=TRUE}, \eqn{f^d} is used, where \emph{f} is the regularization factor and \emph{d} the depth of the node.
##' If regularization is used, multithreading is deactivated because all trees need access to the list of variables that are already included in the model.
##'
##' For a large number of variables and data frames as input data the formula interface can be slow or impossible to use.
##' Alternatively \code{dependent.variable.name} (and \code{status.variable.name} for survival) or \code{x} and \code{y} can be used.
##' Use \code{x} and \code{y} with a matrix for \code{x} to avoid conversions and save memory.
##' Consider setting \code{save.memory = TRUE} if you encounter memory problems for very large datasets, but be aware that this option slows down the tree growing.
##'
##' For GWAS data consider combining \code{ranger} with the \code{GenABEL} package.
##' See the Examples section below for a demonstration using \code{Plink} data.
##' All SNPs in the \code{GenABEL} object will be used for splitting.
##' To use only the SNPs without sex or other covariates from the phenotype file, use \code{0} on the right hand side of the formula.
##' Note that missing values are treated as an extra category while splitting.
##'
##' See \url{https://github.com/imbs-hl/ranger} for the development version.
##'
##' With recent R versions, multithreading on Windows platforms should just work.
##' If you compile yourself, the new RTools toolchain is required.
##'
##' @title Ranger
##' @param formula Object of class \code{formula} or \code{character} describing the model to fit. Interaction terms supported only for numerical variables.
##' @param data Training data of class \code{data.frame}, \code{matrix}, \code{dgCMatrix} (Matrix) or \code{gwaa.data} (GenABEL).
##' @param num.trees Number of trees.
##' @param mtry Number of variables to possibly split at in each node. Default is the (rounded down) square root of the number variables. Alternatively, a single argument function returning an integer, given the number of independent variables.
##' @param importance Variable importance mode, one of 'none', 'impurity', 'impurity_corrected', 'permutation'. The 'impurity' measure is the Gini index for classification, the variance of the responses for regression and the sum of test statistics (see \code{splitrule}) for survival.
##' @param write.forest Save \code{ranger.forest} object, required for prediction. Set to \code{FALSE} to reduce memory usage if no prediction intended.
##' @param probability Grow a probability forest as in Malley et al. (2012).
##' @param min.node.size Minimal node size. Default 1 for classification, 5 for regression, 3 for survival, and 10 for probability.
##' @param max.depth Maximal tree depth. A value of NULL or 0 (the default) corresponds to unlimited depth, 1 to tree stumps (1 split per tree).
##' @param replace Sample with replacement.
##' @param sample.fraction Fraction of observations to sample. Default is 1 for sampling with replacement and 0.632 for sampling without replacement. For classification, this can be a vector of class-specific values.
##' @param case.weights Weights for sampling of training observations. Observations with larger weights will be selected with higher probability in the bootstrap (or subsampled) samples for the trees.
##' @param class.weights Weights for the outcome classes (in order of the factor levels) in the splitting rule (cost sensitive learning). Classification and probability prediction only. For classification the weights are also applied in the majority vote in terminal nodes.
##' @param splitrule Splitting rule. For classification and probability estimation "gini", "extratrees" or "hellinger" with default "gini". For regression "variance", "extratrees", "maxstat" or "beta" with default "variance". For survival "logrank", "extratrees", "C" or "maxstat" with default "logrank".
##' @param num.random.splits For "extratrees" splitrule.: Number of random splits to consider for each candidate splitting variable.
##' @param alpha For "maxstat" splitrule: Significance threshold to allow splitting.
##' @param minprop For "maxstat" splitrule: Lower quantile of covariate distribution to be considered for splitting.
##' @param split.select.weights Numeric vector with weights between 0 and 1, representing the probability to select variables for splitting. Alternatively, a list of size num.trees, containing split select weight vectors for each tree can be used.
##' @param always.split.variables Character vector with variable names to be always selected in addition to the \code{mtry} variables tried for splitting.
##' @param respect.unordered.factors Handling of unordered factor covariates. One of 'ignore', 'order' and 'partition'. For the "extratrees" splitrule the default is "partition" for all other splitrules 'ignore'. Alternatively TRUE (='order') or FALSE (='ignore') can be used. See below for details.
##' @param scale.permutation.importance Scale permutation importance by standard error as in (Breiman 2001). Only applicable if permutation variable importance mode selected.
##' @param regularization.factor Regularization factor (gain penalization), either a vector of length p or one value for all variables.
##' @param regularization.usedepth Consider the depth in regularization.
##' @param local.importance Calculate and return local importance values as in (Breiman 2001). Only applicable if \code{importance} is set to 'permutation'.
##' @param keep.inbag Save how often observations are in-bag in each tree.
##' @param inbag Manually set observations per tree. List of size num.trees, containing inbag counts for each observation. Can be used for stratified sampling.
##' @param holdout Hold-out mode. Hold-out all samples with case weight 0 and use these for variable importance and prediction error.
##' @param quantreg Prepare quantile prediction as in quantile regression forests (Meinshausen 2006). Regression only. Set \code{keep.inbag = TRUE} to prepare out-of-bag quantile prediction.
##' @param oob.error Compute OOB prediction error. Set to \code{FALSE} to save computation time, e.g. for large survival forests.
##' @param num.threads Number of threads. Default is number of CPUs available.
##' @param save.memory Use memory saving (but slower) splitting mode. No effect for survival and GWAS data. Warning: This option slows down the tree growing, use only if you encounter memory problems.
##' @param verbose Show computation status and estimated runtime.
##' @param seed Random seed. Default is \code{NULL}, which generates the seed from \code{R}. Set to \code{0} to ignore the \code{R} seed.
##' @param dependent.variable.name Name of dependent variable, needed if no formula given. For survival forests this is the time variable.
##' @param status.variable.name Name of status variable, only applicable to survival data and needed if no formula given. Use 1 for event and 0 for censoring.
##' @param classification Set to \code{TRUE} to grow a classification forest. Only needed if the data is a matrix or the response numeric.
##' @param x Predictor data (independent variables), alternative interface to data with formula or dependent.variable.name.
##' @param y Response vector (dependent variable), alternative interface to data with formula or dependent.variable.name. For survival use a \code{Surv()} object or a matrix with time and status.
##' @return Object of class \code{ranger} with elements
##' \item{\code{forest}}{Saved forest (If write.forest set to TRUE). Note that the variable IDs in the \code{split.varIDs} object do not necessarily represent the column number in R.}
##' \item{\code{predictions}}{Predicted classes/values, based on out of bag samples (classification and regression only).}
##' \item{\code{variable.importance}}{Variable importance for each independent variable.}
##' \item{\code{variable.importance.local}}{Variable importance for each independent variable and each sample, if \code{local.importance} is set to TRUE and \code{importance} is set to 'permutation'.}
##' \item{\code{prediction.error}}{Overall out of bag prediction error. For classification this is the fraction of missclassified samples, for probability estimation the Brier score, for regression the mean squared error and for survival one minus Harrell's C-index.}
##' \item{\code{r.squared}}{R squared. Also called explained variance or coefficient of determination (regression only). Computed on out of bag data.}
##' \item{\code{confusion.matrix}}{Contingency table for classes and predictions based on out of bag samples (classification only).}
##' \item{\code{unique.death.times}}{Unique death times (survival only).}
##' \item{\code{chf}}{Estimated cumulative hazard function for each sample (survival only).}
##' \item{\code{survival}}{Estimated survival function for each sample (survival only).}
##' \item{\code{call}}{Function call.}
##' \item{\code{num.trees}}{Number of trees.}
##' \item{\code{num.independent.variables}}{Number of independent variables.}
##' \item{\code{mtry}}{Value of mtry used.}
##' \item{\code{min.node.size}}{Value of minimal node size used.}
##' \item{\code{treetype}}{Type of forest/tree. classification, regression or survival.}
##' \item{\code{importance.mode}}{Importance mode used.}
##' \item{\code{num.samples}}{Number of samples.}
##' \item{\code{inbag.counts}}{Number of times the observations are in-bag in the trees.}
##' @examples
##' ## Classification forest with default settings
##' ranger(Species ~ ., data = iris)
##'
##' ## Prediction
##' train.idx <- sample(nrow(iris), 2/3 * nrow(iris))
##' iris.train <- iris[train.idx, ]
##' iris.test <- iris[-train.idx, ]
##' rg.iris <- ranger(Species ~ ., data = iris.train)
##' pred.iris <- predict(rg.iris, data = iris.test)
##' table(iris.test$Species, pred.iris$predictions)
##'
##' ## Quantile regression forest
##' rf <- ranger(mpg ~ ., mtcars[1:26, ], quantreg = TRUE)
##' pred <- predict(rf, mtcars[27:32, ], type = "quantiles")
##' pred$predictions
##'
##' ## Variable importance
##' rg.iris <- ranger(Species ~ ., data = iris, importance = "impurity")
##' rg.iris$variable.importance
##'
##' ## Survival forest
##' require(survival)
##' rg.veteran <- ranger(Surv(time, status) ~ ., data = veteran)
##' plot(rg.veteran$unique.death.times, rg.veteran$survival[1,])
##'
##' ## Alternative interfaces (same results)
##' ranger(dependent.variable.name = "Species", data = iris)
##' ranger(y = iris[, 5], x = iris[, -5])
##'
##' \dontrun{
##' ## Use GenABEL interface to read Plink data into R and grow a classification forest
##' ## The ped and map files are not included
##' library(GenABEL)
##' convert.snp.ped("data.ped", "data.map", "data.raw")
##' dat.gwaa <- load.gwaa.data("data.pheno", "data.raw")
##' phdata(dat.gwaa)$trait <- factor(phdata(dat.gwaa)$trait)
##' ranger(trait ~ ., data = dat.gwaa)
##' }
##'
##' @author Marvin N. Wright
##' @references
##' \itemize{
##' \item Wright, M. N. & Ziegler, A. (2017). ranger: A fast implementation of random forests for high dimensional data in C++ and R. J Stat Softw 77:1-17. \url{https://doi.org/10.18637/jss.v077.i01}.
##' \item Schmid, M., Wright, M. N. & Ziegler, A. (2016). On the use of Harrell's C for clinical risk prediction via random survival forests. Expert Syst Appl 63:450-459. \url{https://doi.org/10.1016/j.eswa.2016.07.018}.
##' \item Wright, M. N., Dankowski, T. & Ziegler, A. (2017). Unbiased split variable selection for random survival forests using maximally selected rank statistics. Stat Med 36:1272-1284. \url{https://doi.org/10.1002/sim.7212}.
##' \item Nembrini, S., Koenig, I. R. & Wright, M. N. (2018). The revival of the Gini Importance? Bioinformatics. \url{https://doi.org/10.1093/bioinformatics/bty373}.
##' \item Breiman, L. (2001). Random forests. Mach Learn, 45:5-32. \url{https://doi.org/10.1023/A:1010933404324}.
##' \item Ishwaran, H., Kogalur, U. B., Blackstone, E. H., & Lauer, M. S. (2008). Random survival forests. Ann Appl Stat 2:841-860. \url{https://doi.org/10.1097/JTO.0b013e318233d835}.
##' \item Malley, J. D., Kruppa, J., Dasgupta, A., Malley, K. G., & Ziegler, A. (2012). Probability machines: consistent probability estimation using nonparametric learning machines. Methods Inf Med 51:74-81. \url{https://doi.org/10.3414/ME00-01-0052}.
##' \item Hastie, T., Tibshirani, R., Friedman, J. (2009). The Elements of Statistical Learning. Springer, New York. 2nd edition.
##' \item Geurts, P., Ernst, D., Wehenkel, L. (2006). Extremely randomized trees. Mach Learn 63:3-42. \url{https://doi.org/10.1007/s10994-006-6226-1}.
##' \item Meinshausen (2006). Quantile Regression Forests. J Mach Learn Res 7:983-999. \url{http://www.jmlr.org/papers/v7/meinshausen06a.html}.
##' \item Sandri, M. & Zuccolotto, P. (2008). A bias correction algorithm for the Gini variable importance measure in classification trees. J Comput Graph Stat, 17:611-628. \url{https://doi.org/10.1198/106186008X344522}.
##' \item Coppersmith D., Hong S. J., Hosking J. R. (1999). Partitioning nominal attributes in decision trees. Data Min Knowl Discov 3:197-217. \url{https://doi.org/10.1023/A:1009869804967}.
##' \item Deng & Runger (2012). Feature selection via regularized trees. The 2012 International Joint Conference on Neural Networks (IJCNN), Brisbane, Australia. \url{https://doi.org/10.1109/IJCNN.2012.6252640}.
##' }
##' @seealso \code{\link{predict.ranger}}
##' @useDynLib ranger, .registration = TRUE
##' @importFrom Rcpp evalCpp
##' @import stats
##' @import utils
##' @importFrom Matrix Matrix
##' @export
ranger <- function(formula = NULL, data = NULL, num.trees = 500, mtry = NULL,
importance = "none", write.forest = TRUE, probability = FALSE,
min.node.size = NULL, max.depth = NULL, replace = TRUE,
sample.fraction = ifelse(replace, 1, 0.632),
case.weights = NULL, class.weights = NULL, splitrule = NULL,
num.random.splits = 1, alpha = 0.5, minprop = 0.1,
split.select.weights = NULL, always.split.variables = NULL,
respect.unordered.factors = NULL,
scale.permutation.importance = FALSE,
local.importance = FALSE,
regularization.factor = 1, regularization.usedepth = FALSE,
keep.inbag = FALSE, inbag = NULL, holdout = FALSE,
quantreg = FALSE, oob.error = TRUE,
num.threads = NULL, save.memory = FALSE,
verbose = TRUE, seed = NULL,
dependent.variable.name = NULL, status.variable.name = NULL,
classification = NULL, x = NULL, y = NULL) {
## By default not in GWAS mode
snp.data <- as.matrix(0)
gwa.mode <- FALSE
if (is.null(data)) {
## x/y interface
if (is.null(x) | is.null(y)) {
stop("Error: Either data or x and y is required.")
}
} else {
## GenABEL GWA data
if (inherits(data, "gwaa.data" )) {
snp.names <- data@gtdata@snpnames
snp.data <- data@gtdata@gtps@.Data
data <- data@phdata
if ("id" %in% names(data)) {
data$"id" <- NULL
}
gwa.mode <- TRUE
save.memory <- FALSE
}
## Formula interface. Use whole data frame if no formula provided and depvarname given
if (is.null(formula)) {
if (is.null(dependent.variable.name)) {
if (is.null(y) | is.null(x)) {
stop("Error: Please give formula, dependent variable name or x/y.")
}
} else {
if (is.null(status.variable.name)) {
y <- data[, dependent.variable.name, drop = TRUE]
x <- data[, !(colnames(data) %in% dependent.variable.name), drop = FALSE]
} else {
y <- survival::Surv(data[, dependent.variable.name], data[, status.variable.name])
x <- data[, !(colnames(data) %in% c(dependent.variable.name, status.variable.name)), drop = FALSE]
}
}
} else {
formula <- formula(formula)
if (!inherits(formula, "formula")) {
stop("Error: Invalid formula.")
}
data.selected <- parse.formula(formula, data, env = parent.frame())
y <- data.selected[, 1]
x <- data.selected[, -1, drop = FALSE]
}
}
## Sparse matrix data
if (inherits(x, "Matrix")) {
if (!inherits(x, "dgCMatrix")) {
stop("Error: Currently only sparse data of class 'dgCMatrix' supported.")
}
if (!is.null(formula)) {
stop("Error: Sparse matrices only supported with alternative interface. Use dependent.variable.name or x/y instead of formula.")
}
}
## Check missing values
if (any(is.na(x))) {
offending_columns <- colnames(x)[colSums(is.na(x)) > 0]
stop("Missing data in columns: ",
paste0(offending_columns, collapse = ", "), ".", call. = FALSE)
}
if (any(is.na(y))) {
stop("Missing data in dependent variable.", call. = FALSE)
}
## Check response levels
if (is.factor(y)) {
if (nlevels(y) != nlevels(droplevels(y))) {
dropped_levels <- setdiff(levels(y), levels(droplevels(y)))
warning("Dropped unused factor level(s) in dependent variable: ",
paste0(dropped_levels, collapse = ", "), ".", call. = FALSE)
}
}
## Treetype
if (is.factor(y) || is.logical(y)) {
if (probability) {
treetype <- 9
} else {
treetype <- 1
}
} else if (is.numeric(y) && (is.null(ncol(y)) || ncol(y) == 1)) {
if (!is.null(classification) && classification && !probability) {
treetype <- 1
} else if (probability) {
treetype <- 9
} else {
treetype <- 3
}
} else if (inherits(y, "Surv") || is.data.frame(y) || is.matrix(y)) {
treetype <- 5
} else {
stop("Error: Unsupported type of dependent variable.")
}
## Quantile prediction only for regression
if (quantreg && treetype != 3) {
stop("Error: Quantile prediction implemented only for regression outcomes.")
}
independent.variable.names <- colnames(x)
## respect.unordered.factors
if (is.null(respect.unordered.factors)) {
if (!is.null(splitrule) && splitrule == "extratrees") {
respect.unordered.factors <- "partition"
} else {
respect.unordered.factors <- "ignore"
}
}
## Old version of respect.unordered.factors
if (respect.unordered.factors == TRUE) {
respect.unordered.factors <- "order"
} else if (respect.unordered.factors == FALSE) {
respect.unordered.factors <- "ignore"
}
## Recode characters as factors and recode factors if 'order' mode
if (!is.matrix(x) && !inherits(x, "Matrix") && ncol(x) > 0) {
character.idx <- sapply(x, is.character)
if (respect.unordered.factors == "order") {
## Recode characters and unordered factors
ordered.idx <- sapply(x, is.ordered)
factor.idx <- sapply(x, is.factor)
recode.idx <- character.idx | (factor.idx & !ordered.idx)
if (any(recode.idx) & (importance == "impurity_corrected" || importance == "impurity_unbiased")) {
warning("Corrected impurity importance may not be unbiased for re-ordered factor levels. Consider setting respect.unordered.factors to 'ignore' or 'partition' or manually compute corrected importance.")
}
## Numeric response
if (is.factor(y)) {
num.y <- as.numeric(y)
} else {
num.y <- y
}
## Recode each column
x[recode.idx] <- lapply(x[recode.idx], function(xx) {
if (!is.factor(xx)) {
xx <- as.factor(xx)
}
if (length(levels(xx)) == 1) {
## Don't order if only one level
levels.ordered <- levels(xx)
} else if (inherits(y, "Surv")) {
## Use median survival if available or largest quantile available in all strata if median not available
levels.ordered <- largest.quantile(y ~ xx)
## Get all levels not in node
levels.missing <- setdiff(levels(xx), levels.ordered)
levels.ordered <- c(levels.missing, levels.ordered)
} else if (is.factor(y) & nlevels(y) > 2) {
levels.ordered <- pca.order(y = y, x = xx)
} else {
## Order factor levels by mean response
means <- sapply(levels(xx), function(y) {
mean(num.y[xx == y])
})
levels.ordered <- as.character(levels(xx)[order(means)])
}
## Return reordered factor
factor(xx, levels = levels.ordered, ordered = TRUE, exclude = NULL)
})
## Save levels
covariate.levels <- lapply(x, levels)
} else {
## Recode characters only
x[character.idx] <- lapply(x[character.idx], factor)
}
}
## If gwa mode, add snp variable names
if (gwa.mode) {
all.independent.variable.names <- c(independent.variable.names, snp.names)
} else {
all.independent.variable.names <- independent.variable.names
}
## Error if no covariates
if (length(all.independent.variable.names) < 1) {
stop("Error: No covariates found.")
}
## Number of trees
if (!is.numeric(num.trees) || num.trees < 1) {
stop("Error: Invalid value for num.trees.")
}
## mtry as a function
if (is.function(mtry)) {
nv <- length(all.independent.variable.names)
if (length(formals(mtry)) > 1){
stop("Error: Given mtry function requires single argument (the number of independent variables in the model).")
}
# Evaluate function
mtry <- try(mtry(nv), silent = TRUE)
if (inherits(mtry, "try-error")) {
message("The mtry function produced the error: ", mtry)
stop("Error: mtry function evaluation resulted in an error.")
}
## Check for a single numeric
if (!is.numeric(mtry) || length(mtry) != 1) {
stop("Error: Given mtry function should return a single integer or numeric.")
} else {
mtry <- as.integer(mtry)
}
## Check for limits
if (mtry < 1 || mtry > nv) {
stop("Error: Given mtry function should evaluate to a value not less than 1 and not greater than the number of independent variables ( = ", nv, " )")
}
}
if (is.null(mtry)) {
mtry <- 0
} else if (!is.numeric(mtry) || mtry < 0) {
stop("Error: Invalid value for mtry")
}
## Seed
if (is.null(seed)) {
seed <- runif(1 , 0, .Machine$integer.max)
}
## Keep inbag
if (!is.logical(keep.inbag)) {
stop("Error: Invalid value for keep.inbag")
}
## Num threads
## Default 0 -> detect from system in C++.
if (is.null(num.threads)) {
num.threads = 0
} else if (!is.numeric(num.threads) || num.threads < 0) {
stop("Error: Invalid value for num.threads")
}
## Minumum node size
if (is.null(min.node.size)) {
min.node.size <- 0
} else if (!is.numeric(min.node.size) || min.node.size < 0) {
stop("Error: Invalid value for min.node.size")
}
## Tree depth
if (is.null(max.depth)) {
max.depth <- 0
} else if (!is.numeric(max.depth) || max.depth < 0) {
stop("Error: Invalid value for max.depth. Please give a positive integer.")
}
## Sample fraction
if (!is.numeric(sample.fraction)) {
stop("Error: Invalid value for sample.fraction. Please give a value in (0,1] or a vector of values in [0,1].")
}
if (length(sample.fraction) > 1) {
if (!(treetype %in% c(1, 9))) {
stop("Error: Invalid value for sample.fraction. Vector values only valid for classification forests.")
}
if (any(sample.fraction < 0) || any(sample.fraction > 1)) {
stop("Error: Invalid value for sample.fraction. Please give a value in (0,1] or a vector of values in [0,1].")
}
if (sum(sample.fraction) <= 0) {
stop("Error: Invalid value for sample.fraction. Sum of values must be >0.")
}
if (length(sample.fraction) != nlevels(y)) {
stop("Error: Invalid value for sample.fraction. Expecting ", nlevels(y), " values, provided ", length(sample.fraction), ".")
}
if (!replace & any(sample.fraction * length(y) > table(y))) {
idx <- which(sample.fraction * length(y) > table(y))[1]
stop("Error: Not enough samples in class ", names(idx),
"; available: ", table(y)[idx],
", requested: ", (sample.fraction * length(y))[idx], ".")
}
if (!is.null(case.weights)) {
stop("Error: Combination of case.weights and class-wise sampling not supported.")
}
# Fix order (C++ needs sample.fraction in order as classes appear in data)
sample.fraction <- sample.fraction[as.numeric(unique(y))]
} else {
if (sample.fraction <= 0 || sample.fraction > 1) {
stop("Error: Invalid value for sample.fraction. Please give a value in (0,1] or a vector of values in [0,1].")
}
}
# Regularization
if (all(regularization.factor == 1)) {
regularization.factor <- c(0, 0)
use.regularization.factor <- FALSE
} else {
# Deactivation of paralellization
if (num.threads != 1) {
num.threads <- 1
warning("Paralellization deactivated (regularization used).")
}
use.regularization.factor <- TRUE
}
if (use.regularization.factor) {
# A few checkings on the regularization coefficients
if (max(regularization.factor) > 1) {
stop("The regularization coefficients cannot be greater than 1.")
}
if (max(regularization.factor) <= 0) {
stop("The regularization coefficients cannot be smaller than 0.")
}
p <- length(all.independent.variable.names)
if (length(regularization.factor) != 1 && length(regularization.factor) != p) {
stop("You must use 1 or p (the number of predictor variables)
regularization coefficients.")
}
if (length(regularization.factor) == 1) {
regularization.factor = rep(regularization.factor, p)
}
}
## Importance mode
if (is.null(importance) || importance == "none") {
importance.mode <- 0
} else if (importance == "impurity") {
importance.mode <- 1
} else if (importance == "impurity_corrected" || importance == "impurity_unbiased") {
importance.mode <- 5
} else if (importance == "permutation") {
if (local.importance) {
importance.mode <- 6
} else if (scale.permutation.importance) {
importance.mode <- 2
} else {
importance.mode <- 3
}
} else {
stop("Error: Unknown importance mode.")
}
## Case weights: NULL for no weights or all weights equal
if (is.null(case.weights) || length(unique(case.weights)) == 1) {
case.weights <- c(0,0)
use.case.weights <- FALSE
if (holdout) {
stop("Error: Case weights required to use holdout mode.")
}
} else {
use.case.weights <- TRUE
## Sample from non-zero weights in holdout mode
if (holdout) {
sample.fraction <- sample.fraction * mean(case.weights > 0)
}
if (!replace && sum(case.weights > 0) < sample.fraction * nrow(x)) {
stop("Error: Fewer non-zero case weights than observations to sample.")
}
}
## Manual inbag selection
if (is.null(inbag)) {
inbag <- list(c(0,0))
use.inbag <- FALSE
} else if (is.list(inbag)) {
use.inbag <- TRUE
if (use.case.weights) {
stop("Error: Combination of case.weights and inbag not supported.")
}
if (length(sample.fraction) > 1) {
stop("Error: Combination of class-wise sampling and inbag not supported.")
}
if (length(inbag) != num.trees) {
stop("Error: Size of inbag list not equal to number of trees.")
}
} else {
stop("Error: Invalid inbag, expects list of vectors of size num.trees.")
}
## Class weights: NULL for no weights (all 1)
if (is.null(class.weights)) {
class.weights <- rep(1, nlevels(y))
} else {
if (!(treetype %in% c(1, 9))) {
stop("Error: Argument class.weights only valid for classification forests.")
}
if (!is.numeric(class.weights) || any(class.weights < 0)) {
stop("Error: Invalid value for class.weights. Please give a vector of non-negative values.")
}
if (length(class.weights) != nlevels(y)) {
stop("Error: Number of class weights not equal to number of classes.")
}
## Reorder (C++ expects order as appearing in the data)
class.weights <- class.weights[unique(as.numeric(y))]
}
## Split select weights: NULL for no weights
if (is.null(split.select.weights)) {
split.select.weights <- list(c(0,0))
use.split.select.weights <- FALSE
} else if (is.numeric(split.select.weights)) {
if (length(split.select.weights) != length(all.independent.variable.names)) {
stop("Error: Number of split select weights not equal to number of independent variables.")
}
split.select.weights <- list(split.select.weights)
use.split.select.weights <- TRUE
} else if (is.list(split.select.weights)) {
if (length(split.select.weights) != num.trees) {
stop("Error: Size of split select weights list not equal to number of trees.")
}
use.split.select.weights <- TRUE
} else {
stop("Error: Invalid split select weights.")
}
## Always split variables: NULL for no variables
if (is.null(always.split.variables)) {
always.split.variables <- c("0", "0")
use.always.split.variables <- FALSE
} else {
use.always.split.variables <- TRUE
}
if (use.split.select.weights && use.always.split.variables) {
stop("Error: Please use only one option of split.select.weights and always.split.variables.")
}
## Splitting rule
if (is.null(splitrule)) {
if (treetype == 5) {
splitrule <- "logrank"
} else if (treetype == 3) {
splitrule <- "variance"
} else if (treetype %in% c(1, 9)) {
splitrule <- "gini"
}
splitrule.num <- 1
} else if (splitrule == "logrank") {
if (treetype == 5) {
splitrule.num <- 1
} else {
stop("Error: logrank splitrule applicable to survival data only.")
}
} else if (splitrule == "gini") {
if (treetype %in% c(1, 9)) {
splitrule.num <- 1
} else {
stop("Error: Gini splitrule applicable to classification data only.")
}
} else if (splitrule == "variance") {
if (treetype == 3) {
splitrule.num <- 1
} else {
stop("Error: variance splitrule applicable to regression data only.")
}
} else if (splitrule == "auc" || splitrule == "C") {
if (treetype == 5) {
splitrule.num <- 2
} else {
stop("Error: C index splitrule applicable to survival data only.")
}
} else if (splitrule == "auc_ignore_ties" || splitrule == "C_ignore_ties") {
if (treetype == 5) {
splitrule.num <- 3
} else {
stop("Error: C index splitrule applicable to survival data only.")
}
} else if (splitrule == "maxstat") {
if (treetype == 5 || treetype == 3) {
splitrule.num <- 4
} else {
stop("Error: maxstat splitrule applicable to regression or survival data only.")
}
} else if (splitrule == "extratrees") {
splitrule.num <- 5
} else if (splitrule == "beta") {
if (treetype == 3) {
splitrule.num <- 6
} else {
stop("Error: beta splitrule applicable to regression data only.")
}
## Check for 0..1 outcome
if (min(y) < 0 || max(y) > 1) {
stop("Error: beta splitrule applicable to regression data with outcome between 0 and 1 only.")
}
} else if (splitrule == "hellinger") {
if (treetype %in% c(1, 9)) {
splitrule.num <- 7
} else {
stop("Error: Hellinger splitrule only implemented for binary classification.")
}
if ((is.factor(y) && nlevels(y) > 2) || (length(unique(y)) > 2)) {
stop("Error: Hellinger splitrule only implemented for binary classification.")
}
} else {
stop("Error: Unknown splitrule.")
}
## Maxstat splitting
if (alpha < 0 || alpha > 1) {
stop("Error: Invalid value for alpha, please give a value between 0 and 1.")
}
if (minprop < 0 || minprop > 0.5) {
stop("Error: Invalid value for minprop, please give a value between 0 and 0.5.")
}
if (splitrule == "maxstat" & use.regularization.factor) {
stop("Error: Regularization cannot be used with 'maxstat' splitrule.")
}
## Extra trees
if (!is.numeric(num.random.splits) || num.random.splits < 1) {
stop("Error: Invalid value for num.random.splits, please give a positive integer.")
}
if (splitrule.num == 5 && save.memory && respect.unordered.factors == "partition") {
stop("Error: save.memory option not possible in extraTrees mode with unordered predictors.")
}
if (num.random.splits > 1 && splitrule.num != 5) {
warning("Argument 'num.random.splits' ignored if splitrule is not 'extratrees'.")
}
## Unordered factors
if (respect.unordered.factors == "partition") {
ordered.idx <- sapply(x, is.ordered)
factor.idx <- sapply(x, is.factor)
unordered.factor.variables <- independent.variable.names[factor.idx & !ordered.idx]
if (length(unordered.factor.variables) > 0) {
use.unordered.factor.variables <- TRUE
## Check level count
num.levels <- sapply(x[, factor.idx & !ordered.idx, drop = FALSE], nlevels)
max.level.count <- .Machine$double.digits
if (max(num.levels) > max.level.count) {
stop(paste("Too many levels in unordered categorical variable ", unordered.factor.variables[which.max(num.levels)],
". Only ", max.level.count, " levels allowed on this system. Consider using the 'order' option.", sep = ""))
}
} else {
unordered.factor.variables <- c("0", "0")
use.unordered.factor.variables <- FALSE
}
} else if (respect.unordered.factors == "ignore" || respect.unordered.factors == "order") {
## Ordering for "order" is handled above
unordered.factor.variables <- c("0", "0")
use.unordered.factor.variables <- FALSE
} else {
stop("Error: Invalid value for respect.unordered.factors, please use 'order', 'partition' or 'ignore'.")
}
## Unordered maxstat splitting not possible
if (use.unordered.factor.variables && !is.null(splitrule)) {
if (splitrule == "maxstat") {
stop("Error: Unordered factor splitting not implemented for 'maxstat' splitting rule.")
} else if (splitrule %in% c("C", "auc", "C_ignore_ties", "auc_ignore_ties")) {
stop("Error: Unordered factor splitting not implemented for 'C' splitting rule.")
} else if (splitrule == "beta") {
stop("Error: Unordered factor splitting not implemented for 'beta' splitting rule.")
}
}
## Warning for experimental 'order' splitting
if (respect.unordered.factors == "order") {
if (treetype == 3 && splitrule == "maxstat") {
warning("Warning: The 'order' mode for unordered factor handling with the 'maxstat' splitrule is experimental.")
}
if (gwa.mode & ((treetype %in% c(1,9) & nlevels(y) > 2) | treetype == 5)) {
stop("Error: Ordering of SNPs currently only implemented for regression and binary outcomes.")
}
}
## Prediction mode always false. Use predict.ranger() method.
prediction.mode <- FALSE
predict.all <- FALSE
prediction.type <- 1
## No loaded forest object
loaded.forest <- list()
## Use sparse matrix
if (inherits(x, "dgCMatrix")) {
sparse.x <- x
x <- matrix(c(0, 0))
use.sparse.data <- TRUE
} else {
sparse.x <- Matrix(matrix(c(0, 0)))
use.sparse.data <- FALSE
if (is.data.frame(x)) {
x <- data.matrix(x)
}
}
if (treetype == 5) {
y.mat <- as.matrix(y)
} else {
y.mat <- as.matrix(as.numeric(y))
}
if (respect.unordered.factors == "order"){
order.snps <- TRUE
} else {
order.snps <- FALSE
}
## No competing risks check
if (treetype == 5) {
if (!all(y.mat[, 2] %in% 0:1)) {
stop("Error: Competing risks not supported yet. Use status=1 for events and status=0 for censoring.")
}
}
## Call Ranger
result <- rangerCpp(treetype, x, y.mat, independent.variable.names, mtry,
num.trees, verbose, seed, num.threads, write.forest, importance.mode,
min.node.size, split.select.weights, use.split.select.weights,
always.split.variables, use.always.split.variables,
prediction.mode, loaded.forest, snp.data,
replace, probability, unordered.factor.variables, use.unordered.factor.variables,
save.memory, splitrule.num, case.weights, use.case.weights, class.weights,
predict.all, keep.inbag, sample.fraction, alpha, minprop, holdout, prediction.type,
num.random.splits, sparse.x, use.sparse.data, order.snps, oob.error, max.depth,
inbag, use.inbag,
regularization.factor, use.regularization.factor, regularization.usedepth)
if (length(result) == 0) {
stop("User interrupt or internal error.")
}
## Prepare results
if (importance.mode != 0) {
names(result$variable.importance) <- all.independent.variable.names
if (importance.mode == 6) {
# process casewise vimp
result$variable.importance.local <-
matrix(
result$variable.importance.local,
byrow = FALSE,
ncol = length(all.independent.variable.names),
dimnames = list(
rownames(data),
all.independent.variable.names
)
)
}
}
## Set predictions
if (treetype == 1 && oob.error) {
if (is.factor(y)) {
result$predictions <- integer.to.factor(result$predictions,
levels(y))
}
result$confusion.matrix <- table(y, result$predictions,
dnn = c("true", "predicted"), useNA = "ifany")
} else if (treetype == 5 && oob.error) {
if (is.list(result$predictions)) {
result$predictions <- do.call(rbind, result$predictions)
}
if (is.vector(result$predictions)) {
result$predictions <- matrix(result$predictions, nrow = 1)
}
result$chf <- result$predictions
result$predictions <- NULL
result$survival <- exp(-result$chf)
} else if (treetype == 9 && oob.error) {
if (is.list(result$predictions)) {
result$predictions <- do.call(rbind, result$predictions)
}
if (is.vector(result$predictions)) {
result$predictions <- matrix(result$predictions, nrow = 1)
}
## Set colnames and sort by levels
colnames(result$predictions) <- unique(y)
if (is.factor(y)) {
result$predictions <- result$predictions[, levels(droplevels(y)), drop = FALSE]
}
}
## Splitrule
result$splitrule <- splitrule
if (splitrule == "extratrees") {
result$num.random.splits <- num.random.splits
}
## Set treetype
if (treetype == 1) {
result$treetype <- "Classification"
} else if (treetype == 3) {
result$treetype <- "Regression"
} else if (treetype == 5) {
result$treetype <- "Survival"
} else if (treetype == 9) {
result$treetype <- "Probability estimation"
}
if (treetype == 3) {
result$r.squared <- 1 - result$prediction.error / var(y)
}
result$call <- sys.call()
result$importance.mode <- importance
if (use.sparse.data) {
result$num.samples <- nrow(sparse.x)
} else {
result$num.samples <- nrow(x)
}
result$replace <- replace
## Write forest object
if (write.forest) {
if (is.factor(y)) {
result$forest$levels <- levels(y)
}
result$forest$independent.variable.names <- independent.variable.names
result$forest$treetype <- result$treetype
class(result$forest) <- "ranger.forest"
## In 'ordered' mode, save covariate levels
if (respect.unordered.factors == "order" && ncol(x) > 0) {
result$forest$covariate.levels <- covariate.levels
}
}
class(result) <- "ranger"
## Prepare quantile prediction
if (quantreg) {
terminal.nodes <- predict(result, x, type = "terminalNodes")$predictions + 1
n <- result$num.samples
result$random.node.values <- matrix(nrow = max(terminal.nodes), ncol = num.trees)
## Select one random obs per node and tree
for (tree in 1:num.trees){
idx <- sample(1:n, n)
result$random.node.values[terminal.nodes[idx, tree], tree] <- y[idx]
}
## Prepare out-of-bag quantile regression
if(!is.null(result$inbag.counts)) {
inbag.counts <- simplify2array(result$inbag.counts)
random.node.values.oob <- 0 * terminal.nodes
random.node.values.oob[inbag.counts > 0] <- NA
## For each tree and observation select one random obs in the same node (not the same obs)
for (tree in 1:num.trees){
is.oob <- inbag.counts[, tree] == 0
num.oob <- sum(is.oob)
if (num.oob != 0) {
oob.obs <- which(is.oob)
oob.nodes <- terminal.nodes[oob.obs, tree]
for (j in 1:num.oob) {
idx <- terminal.nodes[, tree] == oob.nodes[j]
idx[oob.obs[j]] <- FALSE
random.node.values.oob[oob.obs[j], tree] <- save.sample(y[idx], size = 1)
}
}
}
## Check num.trees
minoob <- min(rowSums(inbag.counts == 0))
if (minoob < 10) {
stop("Error: Too few trees for out-of-bag quantile regression.")
}
## Use the same number of values for all obs, select randomly
result$random.node.values.oob <- t(apply(random.node.values.oob, 1, function(x) {
sample(x[!is.na(x)], minoob)
}))
}
}
return(result)
}
|
## Clean environment #################################################################
rm(list=ls())
## Libraries
library(sp)
library(lattice)
library(RColorBrewer)
library(ggplot2)
library(reshape2)
library(mice)
library(Amelia)
library(VIM)
library(gstat)
library(raster)
## Load functions
source("util/my_helper.R")
## Read data #########################################################################
ozone <- readRDS("data/epa/epa_daily/2016/california_ozone.RDS")
temp <- readRDS("data/epa/epa_daily/2016/california_temperature.RDS")
wind <- readRDS("data/epa/epa_daily/2016/california_wind.RDS")
rh <- readRDS("data/epa/epa_daily/2016/california_rh.RDS")
ozone.sites <- readRDS("data/epa/epa_daily/2016/california_ozone_sites.RDS")
temp.sites <- readRDS("data/epa/epa_daily/2016/california_temperature_sites.RDS")
wind.sites <- readRDS("data/epa/epa_daily/2016/california_wind_sites.RDS")
rh.sites <- readRDS("data/epa/epa_daily/2016/california_rh_sites.RDS")
## Read sites
sites <- readRDS("data/epa/sites/aqs_sites.RDS")
sites <- sites[sites$State.Name=="California",]
## Intersection between stations
sum(temp.sites$Station.Code %in% ozone.sites$Station.Code)
sum(wind.sites$Station.Code %in% ozone.sites$Station.Code)
sum(rh.sites$Station.Code %in% ozone.sites$Station.Code) # Only 20!
## Merge with the other variables ########################################################
ozone <- fillMissingDates(ozone)
## Implicit filling of missing dates in the other data frames
cali <- merge(ozone, temp, all.x=T)
cali <- merge(cali, wind, all.x=T)
cali <- merge(cali, rh, all.x=T)
#View(cali)
summary(cali)
days <- seq(from=min(cali$Date), to=max(cali$Date),by='days' )
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Convert to Spatial data ####
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
cali.sp <- convertDataToSp(cali)
# cali.var <- variogram(cali~x+y,
# na.omit(cali.sp[cali.sp$Date==sample(cali.sp$Date,1),"Ozone"])) # Random date
# plot(cali.var)
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Analyze Missing Values ####
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Check missing rows
#md.pattern(cali)
aggr(cali[,-(1:2)], gap=3, cex.axis=0.8)
levelplot(Ozone~Date*Station.Code,cali,
cuts=10,col.regions=rev(brewer.pal(11,"Spectral")),
scales=list(y=list(cex=.7)))
levelplot(Temperature~Date*Station.Code,cali,
cuts=10,col.regions=rev(brewer.pal(11,"Spectral")),
scales=list(y=list(cex=.7)))
levelplot(Wind.speed~Date*Station.Code,cali,
cuts=10,col.regions=rev(brewer.pal(11,"Spectral")),
scales=list(y=list(cex=.7)))
levelplot(RH~Date*Station.Code,cali,
cuts=10,col.regions=rev(brewer.pal(11,"Spectral")),
scales=list(y=list(cex=.7)))
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Impute Temperature ####
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
tempNasByStation <- aggregate(Temperature~Station.Code,cali,function(x){sum(is.na(x))}, na.action=na.pass)
tempNasByStation <- tempNasByStation[tempNasByStation$Temperature>0,]
#View(tempNasByStation)
## Visualize Missing and Available Stations
naStations <- tempNasByStation[tempNasByStation$Temperature>100,1]
temp.sites.na <-
merge(data.frame(Station.Code=naStations),
sites[,c("Station.Code","Latitude","Longitude","Datum","Elevation",
"Location.Setting")], all.x=T)
temp.sites.all <- rbind(cbind(temp.sites,Type="Available"),
cbind(temp.sites.na,Type="Missing"))
mapUSA <- readRDS("data/maps/usa/USA_adm1.rds")
mapCA <- mapUSA[mapUSA$NAME_1=="California",]
ggplot(mapCA) +
geom_polygon(aes(x = long, y = lat, group = group), fill = "white", colour = "black") +
geom_point(data = temp.sites.all,
aes(x = Longitude, y = Latitude, fill=Type),
alpha = .75, shape=21, size=2) +
geom_text(data = ozone.sites[ozone.sites$Station.Code=="089-0009",],
aes(x = Longitude, y = Latitude, label=Station.Code)) +
labs(x = "Longitude", y = "Latitude", fill = "Type") +
coord_quickmap() +
theme(legend.justification = c("right", "top"), legend.position = c(.95, .95),
legend.box.background = element_rect(), legend.box.margin = margin(6, 6, 6, 6))
print(levelplot(Temperature~Date*Station.Code,temp,
cuts=10,col.regions=rev(brewer.pal(11,"Spectral")),
scales=list(y=list(cex=.7))))
## Observations:
## Since the amount of missing data in the "available" dataset is small,
## a simple imputation method (Inverse distance weigthed) is used
## IDS (Inverse Distance Weigthed) Interpolation
# Compute the sample variogram; note that the f.1 trend model is one of the
# parameters passed to variogram(). This tells the function to create the
# variogram on the de-trended data.
temp.sp <- convertDataToSp(temp)
# temp.var <- variogram(Temperature~x+y,temp.sp)
# plot(temp.var)
## Assessing IDW interpolation on different locations
## Test 1
ss <- c(
"083-1021", ## Optimally located between several close stations
"089-3003" ## Most isolated station with 356 values for testing
)
s<- ss[2]
## Results (OS-R2, OS-BIAS)
## 1: 0.9386069, 0.789375
## 2: 0.9219895, 17.93853 ## HUGE BIAS!!!
## NOTES:
## IDW Interpolation can be used for data imputation
## only when there is close stations
## Otherwise, the imputation is substancially biased
if(F){
ggplot(mapCA) +
geom_polygon(aes(x = long, y = lat, group = group), fill = "white", colour = "black") +
geom_point(data = temp.sites.all,
aes(x = Longitude, y = Latitude, fill=Type),
alpha = .75, shape=21, size=2) +
geom_point(data = temp.sites.all[temp.sites.all$Station.Code==s,],
aes(x = Longitude, y = Latitude), alpha = .75, shape=21, size=3, fill="red") +
labs(x = "Longitude", y = "Latitude", fill = "Type") +
coord_quickmap() +
theme(legend.justification = c("right", "top"), legend.position = c(.95, .95),
legend.box.background = element_rect(), legend.box.margin = margin(6, 6, 6, 6))
dim(temp.sp[temp.sp$Station.Code!=s,])
dim(temp.sp[temp.sp$Station.Code==s,])
## IDW
test <- temp[temp$Station.Code==s,]
test <- fillMissingDates(test)
test <- convertDataToSp(test)
#head(coordinates(test))
test$Temperature2 <- NA
temp.sp$Temperature[temp.sp$Station.Code==s
& temp.sp$Date>=convertStringToPOSIXct("2016-05-01")
& temp.sp$Date<=convertStringToPOSIXct("2016-05-31")] <- NA # Inject NAs
#View(temp.sp[temp.sp$Station.Code==s,])
for ( i in 1:length(days) ){
print(days[i])
a <- idw(Temperature ~ 1,
temp.sp[temp.sp$Date==days[i] & !is.na(temp.sp$Temperature),], # IN-SAMPLE
# temp.sp[temp.sp$Station.Code!=s & temp.sp$Date==days[i],], # OUT_OF_SAMPLE
newdata=test[test$Date==days[i],],
idp=2.0, debug.level=0)
test$Temperature2[test$Date==days[i]] <- a$var1.pred
}
plot(Temperature~Date,test, type="l")
lines(Temperature2~Date,test, col=2, lty="dashed")
cor(test$Temperature,test$Temperature2,use="pairwise.complete.obs")^2
mean(test$Temperature2-test$Temperature,na.rm=T)
## Kriging
for ( i in 1:length(days) ){
print(days[i])
sampleVar <- variogram(Temperature~1,temp.sp[temp.sp$Date==days[i],], cutoff=500)
#plot(sampleVar)
modelVar <- fit.variogram(sampleVar, vgm("Exp"))
#plot(sampleVar, modelVar)
k <- gstat(formula=Temperature~1, loc=temp.sp[temp.sp$Date==days[i],], model=modelVar)
kp <- predict(k,
newStation[newStation$Station.Code=="083-1021" & newStation$Date==days[i],])
#spplot(kp)
newStation$Temperature2[newStation$Date==days[i]] <- kp$var1.pred
}
lines(Temperature2~Date,newStation, lty="dashed", col=3)
}
test <- temp[temp$Station.Code==s,]
test <- fillMissingDates(test)
test <- convertDataToSp(test)
#head(coordinates(test))
test$Temperature2 <- NA
if(F){
## IDW (Inverse Distance Weigthed) Interpolation
for( i in 1:length(days) ){
#print(days[i])
a <- idw(Temperature ~ 1,
temp.sp[temp.sp$Station.Code!=s & temp.sp$Date==days[i],],
newdata=test[test$Date==days[i],],
idp=2.0, debug.level=0)
test$Temperature2[test$Station.Code==s & test$Date==days[i]] <- a$var1.pred
}
plot(Temperature~Date,test,type="l")
lines(Temperature2~Date,test,col=2,lty="dashed")
## BIAS:
mean(test$Temperature2-test$Temperature, na.rm=T)
}
### Imputation stage 1: Impute stations with less than 100 NAs by station
### using IDW interpolation
tempNasByStationS1 <- tempNasByStation[tempNasByStation$Temperature>0 & tempNasByStation$Temperature<100,]
cali.sp$TemperatureFlag <- 0
cali.sp$TemperatureFlag[cali.sp$Station.Code %in% tempNasByStationS1$Station.Code
& is.na(cali.sp$Temperature)] <- 1 # Imputation method
for(station in tempNasByStationS1$Station.Code){
print(station)
#days <- cali.sp$Date[cali.sp$Station.Code==station & is.na(cali.sp$Temperature)]
days <- cali.sp$Date[cali.sp$Station.Code==station
& !is.na(cali.sp$TemperatureFlag) & cali.sp$TemperatureFlag==1]
#print(days)
for ( i in 1:length(days) ){
#print(days[i])
a <- idw(Temperature ~ 1,
temp.sp[temp.sp$Date==days[i],],
newdata=cali.sp[cali.sp$Station.Code==station & cali.sp$Date==days[i],],
idp=2.0, debug.level=0)
cali.sp$Temperature[cali.sp$Station.Code==station & cali.sp$Date==days[i]] <- a$var1.pred
}
}
#View(cali.sp[,c("Station.Code","Date","Ozone","Temperature","TemperatureIDW")])
## Test
plot(Temperature~Date,cali.sp[cali.sp$Station.Code==cali.sp$Station.Code[1],], col=0,
ylim=range(cali.sp$Temperature,na.rm=T))
for(station in tempNasByStationS1$Station.Code[tempNasByStationS1$Temperature>0
& tempNasByStationS1$Temperature<100]){
#plot(Temperature~Date,cali.sp[cali.sp$Station.Code==station,], type="l")
lines(Temperature~Date,cali.sp[cali.sp$Station.Code==station,], type="l")
points(Temperature~Date,cali.sp[cali.sp$Station.Code==station
& !is.na(cali.sp$TemperatureIM==1),], col=2, pch=18)
#readline("Continue?")
}
ggplot(cali.sp@data) +
geom_line(aes(Date,Temperature, group=Station.Code, colour=(TemperatureFlag==0)), alpha=0.5) +
theme(legend.position="none")
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Interpolate Unavailable Temperature stations ####
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Annual IDW interpolation (test)
if(F){
annualMeanTemperature <- aggregate(Temperature~Station.Code, temp, mean)
#View(annualMeanTemperature)
annualMeanTemperature <- merge(annualMeanTemperature, sites[,c("Station.Code","x","y")], all.x=T)
coordinates(annualMeanTemperature)<-~x+y
proj4string(annualMeanTemperature) <- utm11
mapCa.utm <- spTransform(mapCA, utm11)
mapCa.utm.raster <- raster(mapCa.utm, res=10);
coordinates(temp.sites.all) <-~Longitude+Latitude
proj4string(temp.sites.all) <- "+proj=longlat"
temp.sites.all <- spTransform(temp.sites.all, utm11)
gs <- gstat(formula=Temperature~1, locations=annualMeanTemperature)
out <- interpolate(mapCa.utm.raster, gs)
out <- mask(out, mapCa.utm)
plot(out)
points(temp.sites.all, pch=3, cex=0.5, col=temp.sites.all$Type)
}
### Imputation stage 2: Interpolate unavailable stations
### using IDW interpolation
tempNasByStationS2 <- tempNasByStation[tempNasByStation$Temperature>100,]
#cali.sp.new <- cali.sp[cali.sp$Station.Code %in% tempNasByStationS2$Station.Code,]
days <- sort(unique(cali.sp$Date))
for ( i in 1:length(days) ){
#print(days[i])
a <- idw(Temperature ~ 1,
temp.sp[temp.sp$Date==days[i],],
newdata=cali.sp[cali.sp$Station.Code %in% tempNasByStationS2$Station.Code & cali.sp$Date==days[i],],
idp=2.0, debug.level=0)
cali.sp$Temperature[cali.sp$Station.Code %in% tempNasByStationS2$Station.Code & cali.sp$Date==days[i]] <- a$var1.pred
}
## Test
ggplot(cali.sp[cali.sp$Station.Code %in% tempNasByStationS2$Station.Code,]@data) +
geom_line(aes(Date,Temperature, colour=Station.Code), alpha=0.5) +
theme(legend.position="none")
levelplot(Temperature~Date*Station.Code,cali.sp@data,
cuts=10,col.regions=rev(brewer.pal(11,"Spectral")),
scales=list(y=list(cex=.7)))
ggplot(cali.sp@data) +
geom_line(aes(Date,Temperature, colour=Station.Code), alpha=0.5) +
theme(legend.position="none")
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Impute Wind speed ####
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
varNasByStation <- aggregate(Wind.speed~Station.Code,cali,function(x){sum(is.na(x))}, na.action=na.pass)
varNasByStation <- varNasByStation[varNasByStation$Wind.speed>0,]
#View(varNasByStation)
wind.sp <- wind
wind.sp <- merge(wind.sp, sites[,c("Station.Code","x","y")], all.x=T)
coordinates(wind.sp) <- ~x+y
proj4string(wind.sp) <- utm11
## Visualize Missing and Available Stations
naStations <- varNasByStation[varNasByStation$Wind.speed>100,1]
var.sites.na <-
merge(data.frame(Station.Code=naStations),
sites[,c("Station.Code","Latitude","Longitude","Datum","Elevation",
"Location.Setting")], all.x=T)
var.sites.all <- rbind(cbind(wind.sites,Type="Available"),
cbind(var.sites.na,Type="Missing"))
ggplot(mapCA) +
geom_polygon(aes(x = long, y = lat, group = group), fill = "white", colour = "black") +
geom_point(data = var.sites.all,
aes(x = Longitude, y = Latitude, fill=Type),
alpha = .75, shape=21, size=2) +
labs(x = "Longitude", y = "Latitude", fill = "Type") +
coord_quickmap() +
theme(legend.justification = c("right", "top"), legend.position = c(.95, .95),
legend.box.background = element_rect(), legend.box.margin = margin(6, 6, 6, 6))
# Test: 001-2005
print(levelplot(Wind.speed~Date*Station.Code,wind,
cuts=10,col.regions=rev(brewer.pal(11,"Spectral")),
scales=list(y=list(cex=.7))))
print(levelplot(Wind.speed~Date*Station.Code,cali,
cuts=10,col.regions=rev(brewer.pal(11,"Spectral")),
scales=list(y=list(cex=.7))))
## Test
theStation <- "089-3003"
if(F){
ggplot(mapCA) +
geom_polygon(aes(x = long, y = lat, group = group), fill = "white", colour = "black") +
geom_point(data = var.sites.all,
aes(x = Longitude, y = Latitude, fill=Type),
alpha = .75, shape=21, size=2) +
geom_point(data = var.sites.all[var.sites.all$Station.Code==theStation,],
aes(x = Longitude, y = Latitude),
alpha = .75, shape=21, size=4, fill="red") +
labs(x = "Longitude", y = "Latitude", fill = "Type") +
coord_quickmap() +
theme(legend.justification = c("right", "top"), legend.position = c(.95, .95),
legend.box.background = element_rect(), legend.box.margin = margin(6, 6, 6, 6))
## IDW
days <- seq(from=min(cali.sp$Date), to=max(cali.sp$Date),by='days' )
cali.sp$Wind.speed.log2 <- NA
cali.sp$Wind.speed.log <- log(cali.sp$Wind.speed)
wind.sp$Wind.speed.log <- log(wind.sp$Wind.speed)
for ( i in 1:length(days) ){
print(days[i])
a <- idw(Wind.speed.log ~ 1,
wind.sp[wind.sp$Date==days[i] & wind.sp$Station.Code!=theStation
& !is.na(wind.sp$Wind.speed),],
newdata=cali.sp[cali.sp$Station.Code==theStation
& cali.sp$Date==days[i],],
idp=2.0, debug.level=0)
cali.sp$Wind.speed.log2[cali.sp$Station.Code==theStation
& cali.sp$Date==days[i]] <- a$var1.pred
}
plot(Wind.speed.log~Date,cali.sp[cali.sp$Station.Code==theStation,], type="l")
lines(Wind.speed.log2~Date,cali.sp[cali.sp$Station.Code==theStation,], col=2, lty="dashed")
## Notes: Fail!!!
## Semivariogram
for ( i in 1:length(days) ){
sampleVar <- variogram(Wind.speed.log~1,cali.sp[cali.sp$Date==days[i] & !is.na(cali.sp$Wind.speed),], cutoff=500)
if(i==1) plot(gamma~dist,sampleVar,type="l")
lines(gamma~dist,sampleVar,col=i)
}
sampleVar <- variogram(Wind.speed.log~1,cali.sp[!is.na(cali.sp$Wind.speed),], cutoff=500)
plot(sampleVar)
modelVar <- fit.variogram(sampleVar, vgm(150, "Exp", 400, nugget = 1))
## NOTE: singular model in variogram fit
sampleVar <- variogram(Wind.speed.log~Temperature,cali.sp[cali.sp$Date==days[1] & !is.na(cali.sp$Wind.speed.log),], cutoff=500)
plot(sampleVar)
modelVar <- fit.variogram(sampleVar, vgm(0.5, "Exp", 50, nugget = 0))
plot(sampleVar, modelVar)
## Kriging
cali.sp$Wind.speed.log3 <- NA
for ( i in 1:length(days) ){
print(days[i])
sampleVar <- variogram(Wind.speed.log~Temperature,cali.sp[cali.sp$Date==days[i] & !is.na(cali.sp$Wind.speed.log),], cutoff=500)
#plot(sampleVar);
modelVar <- fit.variogram(sampleVar, vgm(0.5, "Exp", 50, nugget = 0))
#plot(sampleVar, modelVar)
k <- gstat(formula=Wind.speed~Temperature, loc=wind.sp[cali.sp$Date==days[i] & !is.na(cali.sp$Wind.speed.log),], model=modelVar)
kp <- predict(k, cali.sp[cali.sp$Station.Code==theStation & cali.sp$Date==days[i],])
#spplot(kp)
cali.sp$Wind.speed.log3[cali.sp$Date==days[i]] <- kp$var1.pred
}
plot(Wind.speed.log~Date,cali.sp[cali.sp$Station.Code==theStation,], type="l")
lines(Wind.speed.log3~Date,cali.sp[cali.sp$Station.Code==theStation,], lty="dashed", col=3)
## NOTE: FAIL!
}
### Imputation stage 1: Impute stations with less thatn 100 NAs by station
### using IDW interpolation
tempNasByStationS1 <- tempNasByStation[tempNasByStation$Temperature>0 & tempNasByStation$Temperature<100,]
cali.sp$TemperatureFlag <- 0
cali.sp$TemperatureFlag[cali.sp$Station.Code %in% tempNasByStationS1$Station.Code
& is.na(cali.sp$Temperature)] <- 1 # Imputation method
for(station in tempNasByStationS1$Station.Code){
print(station)
#days <- cali.sp$Date[cali.sp$Station.Code==station & is.na(cali.sp$Temperature)]
days <- cali.sp$Date[cali.sp$Station.Code==station
& !is.na(cali.sp$TemperatureFlag) & cali.sp$TemperatureFlag==1]
#print(days)
for ( i in 1:length(days) ){
#print(days[i])
a <- idw(Temperature ~ 1,
temp.sp[temp.sp$Date==days[i],],
newdata=cali.sp[cali.sp$Station.Code==station & cali.sp$Date==days[i],],
idp=2.0, debug.level=0)
cali.sp$Temperature[cali.sp$Station.Code==station & cali.sp$Date==days[i]] <- a$var1.pred
}
}
#View(cali.sp[,c("Station.Code","Date","Ozone","Temperature","TemperatureIDW")])
## Test
plot(Temperature~Date,cali.sp[cali.sp$Station.Code==cali.sp$Station.Code[1],], col=0,
ylim=range(cali.sp$Temperature,na.rm=T))
for(station in tempNasByStationS1$Station.Code[tempNasByStationS1$Temperature>0
& tempNasByStationS1$Temperature<100]){
#plot(Temperature~Date,cali.sp[cali.sp$Station.Code==station,], type="l")
lines(Temperature~Date,cali.sp[cali.sp$Station.Code==station,], type="l")
points(Temperature~Date,cali.sp[cali.sp$Station.Code==station
& !is.na(cali.sp$TemperatureIM==1),], col=2, pch=18)
#readline("Continue?")
}
ggplot(cali.sp@data) +
geom_line(aes(Date,Temperature, group=Station.Code, colour=(TemperatureFlag==0)), alpha=0.5) +
theme(legend.position="none")
|
/preprocessing/join_epa_daily_ca.R
|
permissive
|
rgualan/soton-data-science-thesis
|
R
| false
| false
| 20,035
|
r
|
## Clean environment #################################################################
rm(list=ls())
## Libraries
library(sp)
library(lattice)
library(RColorBrewer)
library(ggplot2)
library(reshape2)
library(mice)
library(Amelia)
library(VIM)
library(gstat)
library(raster)
## Load functions
source("util/my_helper.R")
## Read data #########################################################################
ozone <- readRDS("data/epa/epa_daily/2016/california_ozone.RDS")
temp <- readRDS("data/epa/epa_daily/2016/california_temperature.RDS")
wind <- readRDS("data/epa/epa_daily/2016/california_wind.RDS")
rh <- readRDS("data/epa/epa_daily/2016/california_rh.RDS")
ozone.sites <- readRDS("data/epa/epa_daily/2016/california_ozone_sites.RDS")
temp.sites <- readRDS("data/epa/epa_daily/2016/california_temperature_sites.RDS")
wind.sites <- readRDS("data/epa/epa_daily/2016/california_wind_sites.RDS")
rh.sites <- readRDS("data/epa/epa_daily/2016/california_rh_sites.RDS")
## Read sites
sites <- readRDS("data/epa/sites/aqs_sites.RDS")
sites <- sites[sites$State.Name=="California",]
## Intersection between stations
sum(temp.sites$Station.Code %in% ozone.sites$Station.Code)
sum(wind.sites$Station.Code %in% ozone.sites$Station.Code)
sum(rh.sites$Station.Code %in% ozone.sites$Station.Code) # Only 20!
## Merge with the other variables ########################################################
ozone <- fillMissingDates(ozone)
## Implicit filling of missing dates in the other data frames
cali <- merge(ozone, temp, all.x=T)
cali <- merge(cali, wind, all.x=T)
cali <- merge(cali, rh, all.x=T)
#View(cali)
summary(cali)
days <- seq(from=min(cali$Date), to=max(cali$Date),by='days' )
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Convert to Spatial data ####
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
cali.sp <- convertDataToSp(cali)
# cali.var <- variogram(cali~x+y,
# na.omit(cali.sp[cali.sp$Date==sample(cali.sp$Date,1),"Ozone"])) # Random date
# plot(cali.var)
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Analyze Missing Values ####
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Check missing rows
#md.pattern(cali)
aggr(cali[,-(1:2)], gap=3, cex.axis=0.8)
levelplot(Ozone~Date*Station.Code,cali,
cuts=10,col.regions=rev(brewer.pal(11,"Spectral")),
scales=list(y=list(cex=.7)))
levelplot(Temperature~Date*Station.Code,cali,
cuts=10,col.regions=rev(brewer.pal(11,"Spectral")),
scales=list(y=list(cex=.7)))
levelplot(Wind.speed~Date*Station.Code,cali,
cuts=10,col.regions=rev(brewer.pal(11,"Spectral")),
scales=list(y=list(cex=.7)))
levelplot(RH~Date*Station.Code,cali,
cuts=10,col.regions=rev(brewer.pal(11,"Spectral")),
scales=list(y=list(cex=.7)))
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Impute Temperature ####
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
tempNasByStation <- aggregate(Temperature~Station.Code,cali,function(x){sum(is.na(x))}, na.action=na.pass)
tempNasByStation <- tempNasByStation[tempNasByStation$Temperature>0,]
#View(tempNasByStation)
## Visualize Missing and Available Stations
naStations <- tempNasByStation[tempNasByStation$Temperature>100,1]
temp.sites.na <-
merge(data.frame(Station.Code=naStations),
sites[,c("Station.Code","Latitude","Longitude","Datum","Elevation",
"Location.Setting")], all.x=T)
temp.sites.all <- rbind(cbind(temp.sites,Type="Available"),
cbind(temp.sites.na,Type="Missing"))
mapUSA <- readRDS("data/maps/usa/USA_adm1.rds")
mapCA <- mapUSA[mapUSA$NAME_1=="California",]
ggplot(mapCA) +
geom_polygon(aes(x = long, y = lat, group = group), fill = "white", colour = "black") +
geom_point(data = temp.sites.all,
aes(x = Longitude, y = Latitude, fill=Type),
alpha = .75, shape=21, size=2) +
geom_text(data = ozone.sites[ozone.sites$Station.Code=="089-0009",],
aes(x = Longitude, y = Latitude, label=Station.Code)) +
labs(x = "Longitude", y = "Latitude", fill = "Type") +
coord_quickmap() +
theme(legend.justification = c("right", "top"), legend.position = c(.95, .95),
legend.box.background = element_rect(), legend.box.margin = margin(6, 6, 6, 6))
print(levelplot(Temperature~Date*Station.Code,temp,
cuts=10,col.regions=rev(brewer.pal(11,"Spectral")),
scales=list(y=list(cex=.7))))
## Observations:
## Since the amount of missing data in the "available" dataset is small,
## a simple imputation method (Inverse distance weigthed) is used
## IDS (Inverse Distance Weigthed) Interpolation
# Compute the sample variogram; note that the f.1 trend model is one of the
# parameters passed to variogram(). This tells the function to create the
# variogram on the de-trended data.
temp.sp <- convertDataToSp(temp)
# temp.var <- variogram(Temperature~x+y,temp.sp)
# plot(temp.var)
## Assessing IDW interpolation on different locations
## Test 1
ss <- c(
"083-1021", ## Optimally located between several close stations
"089-3003" ## Most isolated station with 356 values for testing
)
s<- ss[2]
## Results (OS-R2, OS-BIAS)
## 1: 0.9386069, 0.789375
## 2: 0.9219895, 17.93853 ## HUGE BIAS!!!
## NOTES:
## IDW Interpolation can be used for data imputation
## only when there is close stations
## Otherwise, the imputation is substancially biased
if(F){
ggplot(mapCA) +
geom_polygon(aes(x = long, y = lat, group = group), fill = "white", colour = "black") +
geom_point(data = temp.sites.all,
aes(x = Longitude, y = Latitude, fill=Type),
alpha = .75, shape=21, size=2) +
geom_point(data = temp.sites.all[temp.sites.all$Station.Code==s,],
aes(x = Longitude, y = Latitude), alpha = .75, shape=21, size=3, fill="red") +
labs(x = "Longitude", y = "Latitude", fill = "Type") +
coord_quickmap() +
theme(legend.justification = c("right", "top"), legend.position = c(.95, .95),
legend.box.background = element_rect(), legend.box.margin = margin(6, 6, 6, 6))
dim(temp.sp[temp.sp$Station.Code!=s,])
dim(temp.sp[temp.sp$Station.Code==s,])
## IDW
test <- temp[temp$Station.Code==s,]
test <- fillMissingDates(test)
test <- convertDataToSp(test)
#head(coordinates(test))
test$Temperature2 <- NA
temp.sp$Temperature[temp.sp$Station.Code==s
& temp.sp$Date>=convertStringToPOSIXct("2016-05-01")
& temp.sp$Date<=convertStringToPOSIXct("2016-05-31")] <- NA # Inject NAs
#View(temp.sp[temp.sp$Station.Code==s,])
for ( i in 1:length(days) ){
print(days[i])
a <- idw(Temperature ~ 1,
temp.sp[temp.sp$Date==days[i] & !is.na(temp.sp$Temperature),], # IN-SAMPLE
# temp.sp[temp.sp$Station.Code!=s & temp.sp$Date==days[i],], # OUT_OF_SAMPLE
newdata=test[test$Date==days[i],],
idp=2.0, debug.level=0)
test$Temperature2[test$Date==days[i]] <- a$var1.pred
}
plot(Temperature~Date,test, type="l")
lines(Temperature2~Date,test, col=2, lty="dashed")
cor(test$Temperature,test$Temperature2,use="pairwise.complete.obs")^2
mean(test$Temperature2-test$Temperature,na.rm=T)
## Kriging
for ( i in 1:length(days) ){
print(days[i])
sampleVar <- variogram(Temperature~1,temp.sp[temp.sp$Date==days[i],], cutoff=500)
#plot(sampleVar)
modelVar <- fit.variogram(sampleVar, vgm("Exp"))
#plot(sampleVar, modelVar)
k <- gstat(formula=Temperature~1, loc=temp.sp[temp.sp$Date==days[i],], model=modelVar)
kp <- predict(k,
newStation[newStation$Station.Code=="083-1021" & newStation$Date==days[i],])
#spplot(kp)
newStation$Temperature2[newStation$Date==days[i]] <- kp$var1.pred
}
lines(Temperature2~Date,newStation, lty="dashed", col=3)
}
test <- temp[temp$Station.Code==s,]
test <- fillMissingDates(test)
test <- convertDataToSp(test)
#head(coordinates(test))
test$Temperature2 <- NA
if(F){
## IDW (Inverse Distance Weigthed) Interpolation
for( i in 1:length(days) ){
#print(days[i])
a <- idw(Temperature ~ 1,
temp.sp[temp.sp$Station.Code!=s & temp.sp$Date==days[i],],
newdata=test[test$Date==days[i],],
idp=2.0, debug.level=0)
test$Temperature2[test$Station.Code==s & test$Date==days[i]] <- a$var1.pred
}
plot(Temperature~Date,test,type="l")
lines(Temperature2~Date,test,col=2,lty="dashed")
## BIAS:
mean(test$Temperature2-test$Temperature, na.rm=T)
}
### Imputation stage 1: Impute stations with less than 100 NAs by station
### using IDW interpolation
tempNasByStationS1 <- tempNasByStation[tempNasByStation$Temperature>0 & tempNasByStation$Temperature<100,]
cali.sp$TemperatureFlag <- 0
cali.sp$TemperatureFlag[cali.sp$Station.Code %in% tempNasByStationS1$Station.Code
& is.na(cali.sp$Temperature)] <- 1 # Imputation method
for(station in tempNasByStationS1$Station.Code){
print(station)
#days <- cali.sp$Date[cali.sp$Station.Code==station & is.na(cali.sp$Temperature)]
days <- cali.sp$Date[cali.sp$Station.Code==station
& !is.na(cali.sp$TemperatureFlag) & cali.sp$TemperatureFlag==1]
#print(days)
for ( i in 1:length(days) ){
#print(days[i])
a <- idw(Temperature ~ 1,
temp.sp[temp.sp$Date==days[i],],
newdata=cali.sp[cali.sp$Station.Code==station & cali.sp$Date==days[i],],
idp=2.0, debug.level=0)
cali.sp$Temperature[cali.sp$Station.Code==station & cali.sp$Date==days[i]] <- a$var1.pred
}
}
#View(cali.sp[,c("Station.Code","Date","Ozone","Temperature","TemperatureIDW")])
## Test
plot(Temperature~Date,cali.sp[cali.sp$Station.Code==cali.sp$Station.Code[1],], col=0,
ylim=range(cali.sp$Temperature,na.rm=T))
for(station in tempNasByStationS1$Station.Code[tempNasByStationS1$Temperature>0
& tempNasByStationS1$Temperature<100]){
#plot(Temperature~Date,cali.sp[cali.sp$Station.Code==station,], type="l")
lines(Temperature~Date,cali.sp[cali.sp$Station.Code==station,], type="l")
points(Temperature~Date,cali.sp[cali.sp$Station.Code==station
& !is.na(cali.sp$TemperatureIM==1),], col=2, pch=18)
#readline("Continue?")
}
ggplot(cali.sp@data) +
geom_line(aes(Date,Temperature, group=Station.Code, colour=(TemperatureFlag==0)), alpha=0.5) +
theme(legend.position="none")
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Interpolate Unavailable Temperature stations ####
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Annual IDW interpolation (test)
if(F){
annualMeanTemperature <- aggregate(Temperature~Station.Code, temp, mean)
#View(annualMeanTemperature)
annualMeanTemperature <- merge(annualMeanTemperature, sites[,c("Station.Code","x","y")], all.x=T)
coordinates(annualMeanTemperature)<-~x+y
proj4string(annualMeanTemperature) <- utm11
mapCa.utm <- spTransform(mapCA, utm11)
mapCa.utm.raster <- raster(mapCa.utm, res=10);
coordinates(temp.sites.all) <-~Longitude+Latitude
proj4string(temp.sites.all) <- "+proj=longlat"
temp.sites.all <- spTransform(temp.sites.all, utm11)
gs <- gstat(formula=Temperature~1, locations=annualMeanTemperature)
out <- interpolate(mapCa.utm.raster, gs)
out <- mask(out, mapCa.utm)
plot(out)
points(temp.sites.all, pch=3, cex=0.5, col=temp.sites.all$Type)
}
### Imputation stage 2: Interpolate unavailable stations
### using IDW interpolation
tempNasByStationS2 <- tempNasByStation[tempNasByStation$Temperature>100,]
#cali.sp.new <- cali.sp[cali.sp$Station.Code %in% tempNasByStationS2$Station.Code,]
days <- sort(unique(cali.sp$Date))
for ( i in 1:length(days) ){
#print(days[i])
a <- idw(Temperature ~ 1,
temp.sp[temp.sp$Date==days[i],],
newdata=cali.sp[cali.sp$Station.Code %in% tempNasByStationS2$Station.Code & cali.sp$Date==days[i],],
idp=2.0, debug.level=0)
cali.sp$Temperature[cali.sp$Station.Code %in% tempNasByStationS2$Station.Code & cali.sp$Date==days[i]] <- a$var1.pred
}
## Test
ggplot(cali.sp[cali.sp$Station.Code %in% tempNasByStationS2$Station.Code,]@data) +
geom_line(aes(Date,Temperature, colour=Station.Code), alpha=0.5) +
theme(legend.position="none")
levelplot(Temperature~Date*Station.Code,cali.sp@data,
cuts=10,col.regions=rev(brewer.pal(11,"Spectral")),
scales=list(y=list(cex=.7)))
ggplot(cali.sp@data) +
geom_line(aes(Date,Temperature, colour=Station.Code), alpha=0.5) +
theme(legend.position="none")
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Impute Wind speed ####
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
varNasByStation <- aggregate(Wind.speed~Station.Code,cali,function(x){sum(is.na(x))}, na.action=na.pass)
varNasByStation <- varNasByStation[varNasByStation$Wind.speed>0,]
#View(varNasByStation)
wind.sp <- wind
wind.sp <- merge(wind.sp, sites[,c("Station.Code","x","y")], all.x=T)
coordinates(wind.sp) <- ~x+y
proj4string(wind.sp) <- utm11
## Visualize Missing and Available Stations
naStations <- varNasByStation[varNasByStation$Wind.speed>100,1]
var.sites.na <-
merge(data.frame(Station.Code=naStations),
sites[,c("Station.Code","Latitude","Longitude","Datum","Elevation",
"Location.Setting")], all.x=T)
var.sites.all <- rbind(cbind(wind.sites,Type="Available"),
cbind(var.sites.na,Type="Missing"))
ggplot(mapCA) +
geom_polygon(aes(x = long, y = lat, group = group), fill = "white", colour = "black") +
geom_point(data = var.sites.all,
aes(x = Longitude, y = Latitude, fill=Type),
alpha = .75, shape=21, size=2) +
labs(x = "Longitude", y = "Latitude", fill = "Type") +
coord_quickmap() +
theme(legend.justification = c("right", "top"), legend.position = c(.95, .95),
legend.box.background = element_rect(), legend.box.margin = margin(6, 6, 6, 6))
# Test: 001-2005
print(levelplot(Wind.speed~Date*Station.Code,wind,
cuts=10,col.regions=rev(brewer.pal(11,"Spectral")),
scales=list(y=list(cex=.7))))
print(levelplot(Wind.speed~Date*Station.Code,cali,
cuts=10,col.regions=rev(brewer.pal(11,"Spectral")),
scales=list(y=list(cex=.7))))
## Test
theStation <- "089-3003"
if(F){
ggplot(mapCA) +
geom_polygon(aes(x = long, y = lat, group = group), fill = "white", colour = "black") +
geom_point(data = var.sites.all,
aes(x = Longitude, y = Latitude, fill=Type),
alpha = .75, shape=21, size=2) +
geom_point(data = var.sites.all[var.sites.all$Station.Code==theStation,],
aes(x = Longitude, y = Latitude),
alpha = .75, shape=21, size=4, fill="red") +
labs(x = "Longitude", y = "Latitude", fill = "Type") +
coord_quickmap() +
theme(legend.justification = c("right", "top"), legend.position = c(.95, .95),
legend.box.background = element_rect(), legend.box.margin = margin(6, 6, 6, 6))
## IDW
days <- seq(from=min(cali.sp$Date), to=max(cali.sp$Date),by='days' )
cali.sp$Wind.speed.log2 <- NA
cali.sp$Wind.speed.log <- log(cali.sp$Wind.speed)
wind.sp$Wind.speed.log <- log(wind.sp$Wind.speed)
for ( i in 1:length(days) ){
print(days[i])
a <- idw(Wind.speed.log ~ 1,
wind.sp[wind.sp$Date==days[i] & wind.sp$Station.Code!=theStation
& !is.na(wind.sp$Wind.speed),],
newdata=cali.sp[cali.sp$Station.Code==theStation
& cali.sp$Date==days[i],],
idp=2.0, debug.level=0)
cali.sp$Wind.speed.log2[cali.sp$Station.Code==theStation
& cali.sp$Date==days[i]] <- a$var1.pred
}
plot(Wind.speed.log~Date,cali.sp[cali.sp$Station.Code==theStation,], type="l")
lines(Wind.speed.log2~Date,cali.sp[cali.sp$Station.Code==theStation,], col=2, lty="dashed")
## Notes: Fail!!!
## Semivariogram
for ( i in 1:length(days) ){
sampleVar <- variogram(Wind.speed.log~1,cali.sp[cali.sp$Date==days[i] & !is.na(cali.sp$Wind.speed),], cutoff=500)
if(i==1) plot(gamma~dist,sampleVar,type="l")
lines(gamma~dist,sampleVar,col=i)
}
sampleVar <- variogram(Wind.speed.log~1,cali.sp[!is.na(cali.sp$Wind.speed),], cutoff=500)
plot(sampleVar)
modelVar <- fit.variogram(sampleVar, vgm(150, "Exp", 400, nugget = 1))
## NOTE: singular model in variogram fit
sampleVar <- variogram(Wind.speed.log~Temperature,cali.sp[cali.sp$Date==days[1] & !is.na(cali.sp$Wind.speed.log),], cutoff=500)
plot(sampleVar)
modelVar <- fit.variogram(sampleVar, vgm(0.5, "Exp", 50, nugget = 0))
plot(sampleVar, modelVar)
## Kriging
cali.sp$Wind.speed.log3 <- NA
for ( i in 1:length(days) ){
print(days[i])
sampleVar <- variogram(Wind.speed.log~Temperature,cali.sp[cali.sp$Date==days[i] & !is.na(cali.sp$Wind.speed.log),], cutoff=500)
#plot(sampleVar);
modelVar <- fit.variogram(sampleVar, vgm(0.5, "Exp", 50, nugget = 0))
#plot(sampleVar, modelVar)
k <- gstat(formula=Wind.speed~Temperature, loc=wind.sp[cali.sp$Date==days[i] & !is.na(cali.sp$Wind.speed.log),], model=modelVar)
kp <- predict(k, cali.sp[cali.sp$Station.Code==theStation & cali.sp$Date==days[i],])
#spplot(kp)
cali.sp$Wind.speed.log3[cali.sp$Date==days[i]] <- kp$var1.pred
}
plot(Wind.speed.log~Date,cali.sp[cali.sp$Station.Code==theStation,], type="l")
lines(Wind.speed.log3~Date,cali.sp[cali.sp$Station.Code==theStation,], lty="dashed", col=3)
## NOTE: FAIL!
}
### Imputation stage 1: Impute stations with less thatn 100 NAs by station
### using IDW interpolation
tempNasByStationS1 <- tempNasByStation[tempNasByStation$Temperature>0 & tempNasByStation$Temperature<100,]
cali.sp$TemperatureFlag <- 0
cali.sp$TemperatureFlag[cali.sp$Station.Code %in% tempNasByStationS1$Station.Code
& is.na(cali.sp$Temperature)] <- 1 # Imputation method
for(station in tempNasByStationS1$Station.Code){
print(station)
#days <- cali.sp$Date[cali.sp$Station.Code==station & is.na(cali.sp$Temperature)]
days <- cali.sp$Date[cali.sp$Station.Code==station
& !is.na(cali.sp$TemperatureFlag) & cali.sp$TemperatureFlag==1]
#print(days)
for ( i in 1:length(days) ){
#print(days[i])
a <- idw(Temperature ~ 1,
temp.sp[temp.sp$Date==days[i],],
newdata=cali.sp[cali.sp$Station.Code==station & cali.sp$Date==days[i],],
idp=2.0, debug.level=0)
cali.sp$Temperature[cali.sp$Station.Code==station & cali.sp$Date==days[i]] <- a$var1.pred
}
}
#View(cali.sp[,c("Station.Code","Date","Ozone","Temperature","TemperatureIDW")])
## Test
plot(Temperature~Date,cali.sp[cali.sp$Station.Code==cali.sp$Station.Code[1],], col=0,
ylim=range(cali.sp$Temperature,na.rm=T))
for(station in tempNasByStationS1$Station.Code[tempNasByStationS1$Temperature>0
& tempNasByStationS1$Temperature<100]){
#plot(Temperature~Date,cali.sp[cali.sp$Station.Code==station,], type="l")
lines(Temperature~Date,cali.sp[cali.sp$Station.Code==station,], type="l")
points(Temperature~Date,cali.sp[cali.sp$Station.Code==station
& !is.na(cali.sp$TemperatureIM==1),], col=2, pch=18)
#readline("Continue?")
}
ggplot(cali.sp@data) +
geom_line(aes(Date,Temperature, group=Station.Code, colour=(TemperatureFlag==0)), alpha=0.5) +
theme(legend.position="none")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate.basis.R
\name{generate.basis}
\alias{generate.basis}
\title{generate hermite basis}
\usage{
generate.basis(X, order = 3)
}
\arguments{
\item{X}{an n by p matrix containing the input features}
\item{order}{order of hermite polynomials that we will use}
}
\value{
a n by q matrix, which is a basis expansion of the input X
}
\description{
Generates hermite basis for a given order, which we use for
non-parametric regression
}
|
/man/generate.basis.Rd
|
no_license
|
clu0/diffindiff
|
R
| false
| true
| 513
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate.basis.R
\name{generate.basis}
\alias{generate.basis}
\title{generate hermite basis}
\usage{
generate.basis(X, order = 3)
}
\arguments{
\item{X}{an n by p matrix containing the input features}
\item{order}{order of hermite polynomials that we will use}
}
\value{
a n by q matrix, which is a basis expansion of the input X
}
\description{
Generates hermite basis for a given order, which we use for
non-parametric regression
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## This function get as input a matrix
## and creates a special "matrix" object that can cache its inverse
## In fact a list object with 4 elements is created.
## The elements are 4 functions set, get, setinvX, getinvX which do respectively:
## set: set the variable x equal to y and invX equal to NULL (in this case
## an empty matrix)
## get: return the matrix
## setinvX: store the value of the inverse matrix calculated in cacheSolve and passed
## to setinvX through the varible invMatrix
## getinvX: returns the inverse of the matrix (or NULL if the matrix was not yest
## inverted)
## <<- (superassignment)
## cit "this operator looks back in the eclosing environments for an environment that contain
## the symbol on the left side and when it finds such an environment it replaces
## the value in that environment with the one on the right side
## get return the to be inverted matrix" (An Introduction to R, pg. 47, W. N. Venables,
## D. M. Smith and the R Core Team) As specified in the same tutorial, this operator
## has usually the effect to create a global variable
## and assign the value of the right hand side to it
makeCacheMatrix <- function(x = matrix()) {
invX <- NULL
set <- function(y){
x<<-y
invX <<- NULL
}
get <- function() x
setinvX <- function(invMatrix) invX <<- invMatrix
getinvX <- function() invX
list(set=set, get=get, setinvX=setinvX, getinvX = getinvX)
}
## Write a short comment describing this function
## This function computes the inverse of the matrix returned by
## makeCacheMatrix or retrives the cached value in case the inverse has been already
## calculated.
## To be precise, the input of the function is the list created in makeCacheMatrix
## whose elements are accessed with the operator $
## Initially the line invX <- x$getinvX() call the function getinvX()
## if invX is not NULL, this means that the inverse was already calculated and
## cached. Therefore, the cached value is retrieved and a message "getting cached data"
## is printed on the console to notify this fact
## Otherwise, if invX is NULL (e.g. the inverse of the matrix was not calculated yet),
## the value of the matrix is get: x$get()
## the inverse is calculated: invX <- solve(myMatrix, ...)
## and cached: x$setinvX(invX)
## finally the inverse matrix is printed to the console: invX
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
invX <- x$getinvX()
if (! is.null(invX)){
message("getting cached data")
return(invX)
}
myMatrix <- x$get()
invX <- solve(myMatrix, ...)
x$setinvX(invX)
invX
}
|
/cachematrix.R
|
no_license
|
AsterAlpestris/ProgrammingAssignment2
|
R
| false
| false
| 2,757
|
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## This function get as input a matrix
## and creates a special "matrix" object that can cache its inverse
## In fact a list object with 4 elements is created.
## The elements are 4 functions set, get, setinvX, getinvX which do respectively:
## set: set the variable x equal to y and invX equal to NULL (in this case
## an empty matrix)
## get: return the matrix
## setinvX: store the value of the inverse matrix calculated in cacheSolve and passed
## to setinvX through the varible invMatrix
## getinvX: returns the inverse of the matrix (or NULL if the matrix was not yest
## inverted)
## <<- (superassignment)
## cit "this operator looks back in the eclosing environments for an environment that contain
## the symbol on the left side and when it finds such an environment it replaces
## the value in that environment with the one on the right side
## get return the to be inverted matrix" (An Introduction to R, pg. 47, W. N. Venables,
## D. M. Smith and the R Core Team) As specified in the same tutorial, this operator
## has usually the effect to create a global variable
## and assign the value of the right hand side to it
makeCacheMatrix <- function(x = matrix()) {
invX <- NULL
set <- function(y){
x<<-y
invX <<- NULL
}
get <- function() x
setinvX <- function(invMatrix) invX <<- invMatrix
getinvX <- function() invX
list(set=set, get=get, setinvX=setinvX, getinvX = getinvX)
}
## Write a short comment describing this function
## This function computes the inverse of the matrix returned by
## makeCacheMatrix or retrives the cached value in case the inverse has been already
## calculated.
## To be precise, the input of the function is the list created in makeCacheMatrix
## whose elements are accessed with the operator $
## Initially the line invX <- x$getinvX() call the function getinvX()
## if invX is not NULL, this means that the inverse was already calculated and
## cached. Therefore, the cached value is retrieved and a message "getting cached data"
## is printed on the console to notify this fact
## Otherwise, if invX is NULL (e.g. the inverse of the matrix was not calculated yet),
## the value of the matrix is get: x$get()
## the inverse is calculated: invX <- solve(myMatrix, ...)
## and cached: x$setinvX(invX)
## finally the inverse matrix is printed to the console: invX
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
invX <- x$getinvX()
if (! is.null(invX)){
message("getting cached data")
return(invX)
}
myMatrix <- x$get()
invX <- solve(myMatrix, ...)
x$setinvX(invX)
invX
}
|
library(tm)
stopWords <- stopwords("en")
stopWordses <- stopwords("es")
stopWordsde <- stopwords("de")
pat <- paste0("\\b(",paste0(stopWords, collapse="|") ,")\\b")
pat_es <- paste0("\\b(", paste0(stopWordses, collapse="|"), ")\\b")
pat_de <- paste0("\\b(", paste0(stopWordsde, collapse="|"), ")\\b")
friendlyUrl <- function(text, sep = '-', max = 55) {
#Remove Stopwords
url <- gsub(pat,sep,text)
#Remove GermanStopwords
url <- gsub(pat_de,sep,url)
# Replace non-alphanumeric characters.
url <- gsub('[^A-Za-z0-9]', sep, url)
# Remove double separators (do this twice, in case of 4 or 3 repeats).
doubleSep <- paste(sep, sep, sep = '')
url <- gsub(doubleSep, sep, url)
url <- gsub(doubleSep, sep, url)
doubleSep <- paste(sep, sep, sep = '')
url <- gsub(doubleSep, sep, url)
url <- gsub(doubleSep, sep, url)
# Trim leading and trailing separators.
url <- gsub('^-+|-$', '', url)
# Convert to lowercase and trim to max length.
substr(tolower(url), 1, max)
}
library(readxl)
library(xlsx)
url_friendly_germany <- read_excel("~/R/url_friendly_germany.xlsx")
url_friendly_germany$url_propuesta <- lapply(url_friendly_germany$name,friendlyUrl)
url_friendly_germany$name<- NULL
new_df <- data.frame(lapply(url_friendly_germany,as.character), stringsAsFactors=FALSE)
write.xlsx(x=new_df, file ="url_friendly_trabajado2.xlsx", sheetName = "URL")
write.csv(new_df, file= "url_friendly_trabajado2.csv")
|
/url_friendly.R
|
no_license
|
gabrielbenitezfml/ml-and-data-analysis
|
R
| false
| false
| 1,520
|
r
|
library(tm)
stopWords <- stopwords("en")
stopWordses <- stopwords("es")
stopWordsde <- stopwords("de")
pat <- paste0("\\b(",paste0(stopWords, collapse="|") ,")\\b")
pat_es <- paste0("\\b(", paste0(stopWordses, collapse="|"), ")\\b")
pat_de <- paste0("\\b(", paste0(stopWordsde, collapse="|"), ")\\b")
friendlyUrl <- function(text, sep = '-', max = 55) {
#Remove Stopwords
url <- gsub(pat,sep,text)
#Remove GermanStopwords
url <- gsub(pat_de,sep,url)
# Replace non-alphanumeric characters.
url <- gsub('[^A-Za-z0-9]', sep, url)
# Remove double separators (do this twice, in case of 4 or 3 repeats).
doubleSep <- paste(sep, sep, sep = '')
url <- gsub(doubleSep, sep, url)
url <- gsub(doubleSep, sep, url)
doubleSep <- paste(sep, sep, sep = '')
url <- gsub(doubleSep, sep, url)
url <- gsub(doubleSep, sep, url)
# Trim leading and trailing separators.
url <- gsub('^-+|-$', '', url)
# Convert to lowercase and trim to max length.
substr(tolower(url), 1, max)
}
library(readxl)
library(xlsx)
url_friendly_germany <- read_excel("~/R/url_friendly_germany.xlsx")
url_friendly_germany$url_propuesta <- lapply(url_friendly_germany$name,friendlyUrl)
url_friendly_germany$name<- NULL
new_df <- data.frame(lapply(url_friendly_germany,as.character), stringsAsFactors=FALSE)
write.xlsx(x=new_df, file ="url_friendly_trabajado2.xlsx", sheetName = "URL")
write.csv(new_df, file= "url_friendly_trabajado2.csv")
|
getCombiCoefs <- function(model){
classes = attr(model$terms,"dataClasses")
factors = ifelse(classes[2:length(classes)]!="numeric",T,F)
f = i = var = 1
result = data.frame(i=1:length(coef(model)),var=NA)
for(factor in factors){
if(factor){
n = length(unlist(model$xlevels[f]))
for(j in 1:(n-1)){
result[i,"var"] = var
i = i + 1
}
var = var + 1
f = f + 1
}else{
result[i,"var"] = var
var = var + 1
i = i + 1
}
}
return(result)
}
|
/R/getCombiCoefs.R
|
no_license
|
davidgoes4wce/brant
|
R
| false
| false
| 517
|
r
|
getCombiCoefs <- function(model){
classes = attr(model$terms,"dataClasses")
factors = ifelse(classes[2:length(classes)]!="numeric",T,F)
f = i = var = 1
result = data.frame(i=1:length(coef(model)),var=NA)
for(factor in factors){
if(factor){
n = length(unlist(model$xlevels[f]))
for(j in 1:(n-1)){
result[i,"var"] = var
i = i + 1
}
var = var + 1
f = f + 1
}else{
result[i,"var"] = var
var = var + 1
i = i + 1
}
}
return(result)
}
|
% $Id: rs15-normalize.Rd
\name{normalize}
\alias{normalize,MatrixLike-method}
\title{Normalization}
\description{
This function performs normalization for sample loading after quantification.
It is typically invoked as part of the process of creating summary
information from an \code{RPPASet} object.
}
\usage{
\S4method{normalize}{MatrixLike}(object,
method=getRegisteredNormalizationMethodKeys(),
calc.medians=TRUE,
sweep.cols=calc.medians,
\dots)
}
\arguments{
\item{object}{data frame or matrix to be normalized}
\item{method}{character string specifying name of method of sample loading
normalization (see section \sQuote{Details} below)}
\item{calc.medians}{logical scalar. If \code{TRUE}, calculate row and column
median values from the data to be normalized.}
\item{sweep.cols}{logical scalar. If \code{TRUE}, subtract column medians
from data values prior to invoking the normalization method.}
\item{\dots}{extra arguments for normalization routines}
}
\details{
By default, column medians are subtracted from the input data values; these
adjusted data values are then passed to the requested normalization routine
for further processing.
The \code{method} argument may be augmented with user-provided normalization
methods. Package-provided values are:
\tabular{lll}{
medpolish \tab Tukey's median polish normalization\cr
median \tab sample median normalization\cr
house \tab housekeeping normalization\cr
vs \tab variable slope normalization\cr
none \tab no normalization done\cr
}
Specifying \dQuote{median} as the \code{method} argument causes the row
median to be subtracted from each sample. Specifying \dQuote{house} causes
the median of one or more housekeeping antibodies to be used. The names of
the antibodies to be used must be supplied as a named argument to this
method. Specifying \dQuote{vs} causes the sample median to be used along
with a multiplicative gamma (see reference below).
}
\value{
Returns normalized concentrations as matrix appropriately annotated.
}
\author{
P. Roebuck \email{paul_roebuck@comcast.net},
E. Shannon Neeley \email{sneeley@stat.byu.edu},
James M. Melott \email{jmmelott@mdanderson.org}
}
\seealso{
\code{\linkS4class{RPPASet}}
}
\keyword{smooth}
|
/RPPASPACE/man/rs15-normalize.Rd
|
no_license
|
MD-Anderson-Bioinformatics/rppaspace
|
R
| false
| false
| 2,352
|
rd
|
% $Id: rs15-normalize.Rd
\name{normalize}
\alias{normalize,MatrixLike-method}
\title{Normalization}
\description{
This function performs normalization for sample loading after quantification.
It is typically invoked as part of the process of creating summary
information from an \code{RPPASet} object.
}
\usage{
\S4method{normalize}{MatrixLike}(object,
method=getRegisteredNormalizationMethodKeys(),
calc.medians=TRUE,
sweep.cols=calc.medians,
\dots)
}
\arguments{
\item{object}{data frame or matrix to be normalized}
\item{method}{character string specifying name of method of sample loading
normalization (see section \sQuote{Details} below)}
\item{calc.medians}{logical scalar. If \code{TRUE}, calculate row and column
median values from the data to be normalized.}
\item{sweep.cols}{logical scalar. If \code{TRUE}, subtract column medians
from data values prior to invoking the normalization method.}
\item{\dots}{extra arguments for normalization routines}
}
\details{
By default, column medians are subtracted from the input data values; these
adjusted data values are then passed to the requested normalization routine
for further processing.
The \code{method} argument may be augmented with user-provided normalization
methods. Package-provided values are:
\tabular{lll}{
medpolish \tab Tukey's median polish normalization\cr
median \tab sample median normalization\cr
house \tab housekeeping normalization\cr
vs \tab variable slope normalization\cr
none \tab no normalization done\cr
}
Specifying \dQuote{median} as the \code{method} argument causes the row
median to be subtracted from each sample. Specifying \dQuote{house} causes
the median of one or more housekeeping antibodies to be used. The names of
the antibodies to be used must be supplied as a named argument to this
method. Specifying \dQuote{vs} causes the sample median to be used along
with a multiplicative gamma (see reference below).
}
\value{
Returns normalized concentrations as matrix appropriately annotated.
}
\author{
P. Roebuck \email{paul_roebuck@comcast.net},
E. Shannon Neeley \email{sneeley@stat.byu.edu},
James M. Melott \email{jmmelott@mdanderson.org}
}
\seealso{
\code{\linkS4class{RPPASet}}
}
\keyword{smooth}
|
runcyclicspectrum <- function(peptide) {
imt <- integermasstable()
peptide <- strsplit(peptide,"")[[1]]
cyclicspec <- cyclicspectrum(peptide, imt$proteins, imt$intmass)
return(cyclicspec)
}
cyclicspectrum <- function(peptide, aminoacid, aminoacidmass) {
## peptide is the peptide for which we are determining the sequence
## aminoacid is a vector of amino acids, and the aminoacidmass
## parameter is a vector of amino acid masses. This is
## structured so that for all i in aminoacid, the mass of aminoacid[i]
## equals aminoacidmass[i].
prefixmass <- rep(0,1)
for (i in 1:length(peptide)) {
for (j in 1:20) {
if(aminoacid[j] == peptide[i]) {
if (i > 1)
prefixmass <- c(prefixmass, prefixmass[i] + aminoacidmass[j])
else
prefixmass <- c(prefixmass, aminoacidmass[j])
}
}
}
peptidemass <- prefixmass[length(prefixmass)]
cyclspec <- rep(0,1)
for (i in 1:length(peptide)) {
for (j in (i + 1):(length(peptide) + 1)) {
cyclspec <- c(cyclspec, prefixmass[j] - prefixmass[i])
if (i > 1 && j < (length(peptide) + 1))
cyclspec <- c(cyclspec, (peptidemass - (prefixmass[j] - prefixmass[i])))
}
}
return(sort(cyclspec))
}
integermasstable <- function() {
proteins <- c('G', 'A', 'S', 'P', 'V', 'T', 'C', 'I', 'L', 'N', 'D', 'K', 'Q', 'E', 'M', 'H', 'F', 'R', 'Y', 'W')
intmass <- as.integer(c(57, 71, 87, 97, 99, 101, 103, 113, 113, 114, 115, 128, 128, 129, 131, 137, 147, 156, 163, 186))
df <- data.frame(cbind(proteins, intmass), stringsAsFactors=FALSE)
df$intmass <- as.integer(df$intmass)
return(df)
}
|
/cyclicspectrum.R
|
no_license
|
HomerJSimpson1/BioinfAlgos
|
R
| false
| false
| 1,659
|
r
|
runcyclicspectrum <- function(peptide) {
imt <- integermasstable()
peptide <- strsplit(peptide,"")[[1]]
cyclicspec <- cyclicspectrum(peptide, imt$proteins, imt$intmass)
return(cyclicspec)
}
cyclicspectrum <- function(peptide, aminoacid, aminoacidmass) {
## peptide is the peptide for which we are determining the sequence
## aminoacid is a vector of amino acids, and the aminoacidmass
## parameter is a vector of amino acid masses. This is
## structured so that for all i in aminoacid, the mass of aminoacid[i]
## equals aminoacidmass[i].
prefixmass <- rep(0,1)
for (i in 1:length(peptide)) {
for (j in 1:20) {
if(aminoacid[j] == peptide[i]) {
if (i > 1)
prefixmass <- c(prefixmass, prefixmass[i] + aminoacidmass[j])
else
prefixmass <- c(prefixmass, aminoacidmass[j])
}
}
}
peptidemass <- prefixmass[length(prefixmass)]
cyclspec <- rep(0,1)
for (i in 1:length(peptide)) {
for (j in (i + 1):(length(peptide) + 1)) {
cyclspec <- c(cyclspec, prefixmass[j] - prefixmass[i])
if (i > 1 && j < (length(peptide) + 1))
cyclspec <- c(cyclspec, (peptidemass - (prefixmass[j] - prefixmass[i])))
}
}
return(sort(cyclspec))
}
integermasstable <- function() {
proteins <- c('G', 'A', 'S', 'P', 'V', 'T', 'C', 'I', 'L', 'N', 'D', 'K', 'Q', 'E', 'M', 'H', 'F', 'R', 'Y', 'W')
intmass <- as.integer(c(57, 71, 87, 97, 99, 101, 103, 113, 113, 114, 115, 128, 128, 129, 131, 137, 147, 156, 163, 186))
df <- data.frame(cbind(proteins, intmass), stringsAsFactors=FALSE)
df$intmass <- as.integer(df$intmass)
return(df)
}
|
#' @name runifm
#' @title Create matrix of random values drawn from uniform distribution
#' @param nrow number of rows
#' @param ncol numer of columns
#' @param min lower limit of the distribution. Must be finite.
#' @param max upper limit of the distribution. Must be finite.
#' @importFrom stats runif
#' @return a matrix
#' @examples
#' runifm(3, 3)
#' runifm(4, 5, min = -1, max = 3)
#' @export
runifm <- function(nrow, ncol, min = 0, max = 1){
n <- nrow * ncol
matrix(runif(n = n, min = min, max = max), nrow = nrow, ncol = ncol)
}
#' @name rboolm
#' @title Create matrix of random choosen boolean values
#' @param nrow number of rows
#' @param ncol numer of columns
#' @param true.proba probability of true values; default: 0.5
#' @importFrom stats runif
#' @return a matrix
#' @examples
#' rboolm(3, 3)
#' rboolm(4, 5, true.proba = 0.3)
#' @export
rboolm <- function(nrow, ncol, true.proba = 0.5){
n <- nrow * ncol
matrix(runif(n = n), nrow = nrow, ncol = ncol) <= true.proba
}
#' @name runif_same_dims
#' @title Create matrix of random values with dimensions copied from an existing matrix
#' @param mat matrix
#' @param min lower limit of the distribution. Must be finite.
#' @param max upper limit of the distribution. Must be finite.
#' @importFrom stats runif
#' @return a matrix
#' @examples
#' mat <- matrix(0, 3, 3)
#' runif_same_dims(mat)
#' @export
runif_same_dims <- function(mat, min = 0, max = 1){
data <- runif(length(mat), min = min, max = max)
matrix(data = data, nrow = nrow(mat), ncol = ncol(mat))
}
|
/R/random-matrix.R
|
permissive
|
krzjoa/matricks
|
R
| false
| false
| 1,539
|
r
|
#' @name runifm
#' @title Create matrix of random values drawn from uniform distribution
#' @param nrow number of rows
#' @param ncol numer of columns
#' @param min lower limit of the distribution. Must be finite.
#' @param max upper limit of the distribution. Must be finite.
#' @importFrom stats runif
#' @return a matrix
#' @examples
#' runifm(3, 3)
#' runifm(4, 5, min = -1, max = 3)
#' @export
runifm <- function(nrow, ncol, min = 0, max = 1){
n <- nrow * ncol
matrix(runif(n = n, min = min, max = max), nrow = nrow, ncol = ncol)
}
#' @name rboolm
#' @title Create matrix of random choosen boolean values
#' @param nrow number of rows
#' @param ncol numer of columns
#' @param true.proba probability of true values; default: 0.5
#' @importFrom stats runif
#' @return a matrix
#' @examples
#' rboolm(3, 3)
#' rboolm(4, 5, true.proba = 0.3)
#' @export
rboolm <- function(nrow, ncol, true.proba = 0.5){
n <- nrow * ncol
matrix(runif(n = n), nrow = nrow, ncol = ncol) <= true.proba
}
#' @name runif_same_dims
#' @title Create matrix of random values with dimensions copied from an existing matrix
#' @param mat matrix
#' @param min lower limit of the distribution. Must be finite.
#' @param max upper limit of the distribution. Must be finite.
#' @importFrom stats runif
#' @return a matrix
#' @examples
#' mat <- matrix(0, 3, 3)
#' runif_same_dims(mat)
#' @export
runif_same_dims <- function(mat, min = 0, max = 1){
data <- runif(length(mat), min = min, max = max)
matrix(data = data, nrow = nrow(mat), ncol = ncol(mat))
}
|
## Setup libraries =============================================================
library(tidyverse)
library(voterdiffR)
## Initialize blocks and summary dataframe =====================================
init <- block_init(
varlist,
firstname = "szNameFirst",
gender = "gender",
address_partial = c("sHouseNum", "szStreetName"),
address_full = "szSitusAddress",
exceptions = list(c("gender", "szNameLast"))
)
## Load the initial dataframe and assess reduction ratio =======================
### We can ignore warning on rowwise data structure being stripped
load(paste0(path_clean, clean_prefix, date_df[1, ]$date_label, ".Rda"))
prematch_output <- suppressWarnings(
block_prematch(df, init$blocks, init$summ_df)
)
summ_df <- prematch_output$summ_df %>%
### If we are using phones or emails, always block by gender
### Because families tend to share them
filter(
comparison < (nrow(df) * threshold_prop) &
!(gender == 0 & szNameFirst == 0 & szPhone == 1) &
!(gender == 0 & szNameFirst == 0 & szEmailAddress == 1)
) %>%
### Blocks with no pairs have no use
filter(comparison > 0) %>%
mutate(row = row_number()) %>%
select(row, everything())
## Prepare to match block-by-block =============================================
fastprep_output <- block_fastprep(df, prematch_output, summ_df)
assert_that(
sum(
fastprep_output$fastprep %>%
map(~ nrow(inner_join(.x$mismatch_A, .x$mismatch_B))) %>%
unlist()
) == 0
)
date_df <- snapshot_list()
dups <- list(
by_snapshot = vrdedup(
threshold = cutoff,
summ_final = summ_final,
date_df = date_df,
varlist = vl_dedup,
path_clean = path_clean,
path_dedup = path_dedup,
id_var = vl$id[1],
dedup_prefix = dedup_prefix,
clean_prefix = clean_prefix,
extra_vars = "gender",
exist_files = TRUE
)
)
dups$by_type <- dups_type(
dups$by_snapshot, date_df, path_clean, path_dedup,
clean_prefix, id_var = vl$id[1]
)
save(dups_final, file = paste0(path_dedup, "dups_type.Rda"))
|
/dedup.R
|
no_license
|
monitoringtheelection/auditingVR
|
R
| false
| false
| 2,092
|
r
|
## Setup libraries =============================================================
library(tidyverse)
library(voterdiffR)
## Initialize blocks and summary dataframe =====================================
init <- block_init(
varlist,
firstname = "szNameFirst",
gender = "gender",
address_partial = c("sHouseNum", "szStreetName"),
address_full = "szSitusAddress",
exceptions = list(c("gender", "szNameLast"))
)
## Load the initial dataframe and assess reduction ratio =======================
### We can ignore warning on rowwise data structure being stripped
load(paste0(path_clean, clean_prefix, date_df[1, ]$date_label, ".Rda"))
prematch_output <- suppressWarnings(
block_prematch(df, init$blocks, init$summ_df)
)
summ_df <- prematch_output$summ_df %>%
### If we are using phones or emails, always block by gender
### Because families tend to share them
filter(
comparison < (nrow(df) * threshold_prop) &
!(gender == 0 & szNameFirst == 0 & szPhone == 1) &
!(gender == 0 & szNameFirst == 0 & szEmailAddress == 1)
) %>%
### Blocks with no pairs have no use
filter(comparison > 0) %>%
mutate(row = row_number()) %>%
select(row, everything())
## Prepare to match block-by-block =============================================
fastprep_output <- block_fastprep(df, prematch_output, summ_df)
assert_that(
sum(
fastprep_output$fastprep %>%
map(~ nrow(inner_join(.x$mismatch_A, .x$mismatch_B))) %>%
unlist()
) == 0
)
date_df <- snapshot_list()
dups <- list(
by_snapshot = vrdedup(
threshold = cutoff,
summ_final = summ_final,
date_df = date_df,
varlist = vl_dedup,
path_clean = path_clean,
path_dedup = path_dedup,
id_var = vl$id[1],
dedup_prefix = dedup_prefix,
clean_prefix = clean_prefix,
extra_vars = "gender",
exist_files = TRUE
)
)
dups$by_type <- dups_type(
dups$by_snapshot, date_df, path_clean, path_dedup,
clean_prefix, id_var = vl$id[1]
)
save(dups_final, file = paste0(path_dedup, "dups_type.Rda"))
|
# ------------------------------------------------------------------------------
# Libraries
library(dplyr)
library(readr)
library(stringr)
library(caret)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Read, convert and subset the data
load("../Data/data_train.rda")
load("../Data/data_test.rda")
X_train <- data_train %>% dplyr::select(dplyr::matches("pix*"))
X_test <- data_test %>% dplyr::select(dplyr::matches("pix*"))
data_train[["class"]] <- as.factor(make.names(data_train[["class"]]))
y_train <- data_train[["class"]]
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Control using cross-validation
fitControl <- trainControl(method="cv",
number=3,
verboseIter=TRUE)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Hyperparameters tunning
modelFit <- caret::train(y=y_train,
x=X_train,
method="gamLoess",
trControl=fitControl,
tuneLength = 10)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Accuracy check on train sample
pred.test <- predict(modelFit,newdata=subset(data_train, select = -c(class)))
matrix <- confusionMatrix(data_train[["class"]], pred.test)
cat(c(format(Sys.time(), "%d/%m/%Y %H:%M"),
modelFit[["method"]],
round(matrix[["overall"]][1], 5),"\n"),
file="../Log/log.txt",
sep = "\t",
append = TRUE)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Predict test sample
pred <- predict(modelFit,newdata=X_test) %>% str_remove("[X]")
write.table(pred,
paste0("../Pred/pred_",modelFit[["method"]],"_",format(Sys.time(),
"%y%m%d%H%M"),".csv"),
col.names=FALSE,
row.names=FALSE,
quote = FALSE)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Save model
saveRDS(modelFit,
paste0("../Model/",modelFit[["method"]],"_",format(Sys.time(),
"%y%m%d%H%M"),".rds"))
# ------------------------------------------------------------------------------
|
/Caret/Script/Script_gamLoess.R
|
no_license
|
bemayer/DigitRecognizer
|
R
| false
| false
| 2,723
|
r
|
# ------------------------------------------------------------------------------
# Libraries
library(dplyr)
library(readr)
library(stringr)
library(caret)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Read, convert and subset the data
load("../Data/data_train.rda")
load("../Data/data_test.rda")
X_train <- data_train %>% dplyr::select(dplyr::matches("pix*"))
X_test <- data_test %>% dplyr::select(dplyr::matches("pix*"))
data_train[["class"]] <- as.factor(make.names(data_train[["class"]]))
y_train <- data_train[["class"]]
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Control using cross-validation
fitControl <- trainControl(method="cv",
number=3,
verboseIter=TRUE)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Hyperparameters tunning
modelFit <- caret::train(y=y_train,
x=X_train,
method="gamLoess",
trControl=fitControl,
tuneLength = 10)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Accuracy check on train sample
pred.test <- predict(modelFit,newdata=subset(data_train, select = -c(class)))
matrix <- confusionMatrix(data_train[["class"]], pred.test)
cat(c(format(Sys.time(), "%d/%m/%Y %H:%M"),
modelFit[["method"]],
round(matrix[["overall"]][1], 5),"\n"),
file="../Log/log.txt",
sep = "\t",
append = TRUE)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Predict test sample
pred <- predict(modelFit,newdata=X_test) %>% str_remove("[X]")
write.table(pred,
paste0("../Pred/pred_",modelFit[["method"]],"_",format(Sys.time(),
"%y%m%d%H%M"),".csv"),
col.names=FALSE,
row.names=FALSE,
quote = FALSE)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Save model
saveRDS(modelFit,
paste0("../Model/",modelFit[["method"]],"_",format(Sys.time(),
"%y%m%d%H%M"),".rds"))
# ------------------------------------------------------------------------------
|
## ------------------------------------------------------------------------
library("vembedr")
library("htmltools")
## ------------------------------------------------------------------------
embed_channel9(
id = c("Events", "useR-international-R-User-conference", "useR2016", "Forty-years-of-S")
)
## ------------------------------------------------------------------------
embed_user2016(id = "Literate-Programming")
## ------------------------------------------------------------------------
embed_youtube(id = "q2nNzNo_Xps")
## ------------------------------------------------------------------------
div(align = "center", embed_youtube(id = "Qpoqzt2EHaA"))
## ------------------------------------------------------------------------
embed_vimeo(id = "48699174")
## ------------------------------------------------------------------------
embed_youtube(id = "8SGif63VW6E", query = list(start = 252))
## ----eval=FALSE----------------------------------------------------------
# embed_vimeo(id = "98892825", fragment = "t=60")
## ----eval=FALSE----------------------------------------------------------
# embed_youtube(id = "8SGif63VW6E") %>% use_start_time("4m12s")
## ----eval=FALSE----------------------------------------------------------
# embed_vimeo(id = "98892825") %>% use_start_time("60")
## ------------------------------------------------------------------------
embed_user2016(id = "Day-3-Siepr-130-Ligtning-Talks-100-PM-140-PM") %>%
use_start_time("21m45s")
## ------------------------------------------------------------------------
rickroll_youtube()
## ------------------------------------------------------------------------
rickroll_vimeo()
## ----eval=FALSE----------------------------------------------------------
# embed_youtube() %>% use_rickroll()
|
/docs/articles/embed.R
|
no_license
|
JohnModica/vembedr
|
R
| false
| false
| 1,799
|
r
|
## ------------------------------------------------------------------------
library("vembedr")
library("htmltools")
## ------------------------------------------------------------------------
embed_channel9(
id = c("Events", "useR-international-R-User-conference", "useR2016", "Forty-years-of-S")
)
## ------------------------------------------------------------------------
embed_user2016(id = "Literate-Programming")
## ------------------------------------------------------------------------
embed_youtube(id = "q2nNzNo_Xps")
## ------------------------------------------------------------------------
div(align = "center", embed_youtube(id = "Qpoqzt2EHaA"))
## ------------------------------------------------------------------------
embed_vimeo(id = "48699174")
## ------------------------------------------------------------------------
embed_youtube(id = "8SGif63VW6E", query = list(start = 252))
## ----eval=FALSE----------------------------------------------------------
# embed_vimeo(id = "98892825", fragment = "t=60")
## ----eval=FALSE----------------------------------------------------------
# embed_youtube(id = "8SGif63VW6E") %>% use_start_time("4m12s")
## ----eval=FALSE----------------------------------------------------------
# embed_vimeo(id = "98892825") %>% use_start_time("60")
## ------------------------------------------------------------------------
embed_user2016(id = "Day-3-Siepr-130-Ligtning-Talks-100-PM-140-PM") %>%
use_start_time("21m45s")
## ------------------------------------------------------------------------
rickroll_youtube()
## ------------------------------------------------------------------------
rickroll_vimeo()
## ----eval=FALSE----------------------------------------------------------
# embed_youtube() %>% use_rickroll()
|
\name{pipe.DEtools}
\alias{pipe.DESeq}
\alias{pipe.EdgeR}
\alias{pipe.RankProduct}
\alias{pipe.RoundRobin}
\alias{pipe.SAM}
\title{
Pipes for Group-wise Differential Expression Tools like DESeq, EdgeR, SAM, etc.
}
\description{
Wrapper functions to a family of published DE tools, to find significant
differentially expressed genes between groups of samples.
}
\usage{
pipe.DESeq(sampleIDset, speciesID = getCurrentSpecies(), annotationFile = "Annotation.txt",
optionsFile = "Options.txt", useMultiHits = TRUE, results.path = NULL,
groupColumn = "Group", colorColumn = "Color", folderName = "",
altGeneMap = NULL, altGeneMapLabel = NULL, targetID = NULL, Ngenes = 100,
geneColumnHTML = if (speciesID \%in\% MAMMAL_SPECIES) "NAME" else "GENE_ID",
keepIntergenics = FALSE, verbose = !interactive(), label = "",
doDE = TRUE, PLOT.FUN = NULL, ...)
pipe.EdgeR(sampleIDset, speciesID = getCurrentSpecies(), annotationFile = "Annotation.txt",
optionsFile = "Options.txt", useMultiHits = TRUE, results.path = NULL,
groupColumn = "Group", colorColumn = "Color", folderName = "",
altGeneMap = NULL, altGeneMapLabel = NULL, targetID = NULL, Ngenes = 100,
geneColumnHTML = if (speciesID \%in\% MAMMAL_SPECIES) "NAME" else "GENE_ID",
keepIntergenics = FALSE, verbose = !interactive(), label = "",
doDE = TRUE, PLOT.FUN = NULL, ...)
pipe.RankProduct(sampleIDset, speciesID = getCurrentSpecies(), annotationFile = "Annotation.txt",
optionsFile = "Options.txt", useMultiHits = TRUE, results.path = NULL,
groupColumn = "Group", colorColumn = "Color", folderName = "",
altGeneMap = NULL, altGeneMapLabel = NULL, targetID = NULL, Ngenes = 100,
geneColumnHTML = if (speciesID \%in\% MAMMAL_SPECIES) "NAME" else "GENE_ID",
keepIntergenics = FALSE, verbose = !interactive(), label = "",
doDE = TRUE, PLOT.FUN = NULL, ...)
pipe.RoundRobin(sampleIDset, speciesID = getCurrentSpecies(), annotationFile = "Annotation.txt",
optionsFile = "Options.txt", useMultiHits = TRUE, results.path = NULL,
groupColumn = "Group", colorColumn = "Color", folderName = "",
altGeneMap = NULL, altGeneMapLabel = NULL, targetID = NULL, Ngenes = 100,
geneColumnHTML = if (speciesID \%in\% MAMMAL_SPECIES) "NAME" else "GENE_ID",
keepIntergenics = FALSE, verbose = !interactive(), label = "",
doDE = TRUE, PLOT.FUN = NULL, ...)
pipe.SAM(sampleIDset, speciesID = getCurrentSpecies(), annotationFile = "Annotation.txt",
optionsFile = "Options.txt", useMultiHits = TRUE, results.path = NULL,
groupColumn = "Group", colorColumn = "Color", folderName = "",
altGeneMap = NULL, altGeneMapLabel = NULL, targetID = NULL, Ngenes = 100,
geneColumnHTML = if (speciesID \%in\% MAMMAL_SPECIES) "NAME" else "GENE_ID",
keepIntergenics = FALSE, verbose = !interactive(), label = "",
doDE = TRUE, PLOT.FUN = NULL, ...)
}
\arguments{
\item{sampleIDset}{
Character vector of SampleIDs, giving the full set of samples that will take part in the DE calculations.
}
\item{speciesID}{
The SpeciesID for one single species. The DE tools do not operate on multipe species at one time.
}
\item{annotationFile}{
File of sample annotation details, which specifies all needed
sample-specific information about the samples under study.
See \code{\link{DuffyNGS_Annotation}}.
}
\item{optionsFile}{
File of processing options, which specifies all processing
parameters that are not sample specific. See \code{\link{DuffyNGS_Options}}.
}
\item{useMultiHits}{
Logical. By default, all DE tools use the RPKM or READ values from the transcriptomes that correspond
to keeping all aligned reads, including those alignments called "MultiHit" reads. If \code{FALSE}, this
behavior can be restricted to only using uniquely mapped reads. Since the transcriptomes store both
methods of counting gene abundance, changing how the DE results may be impacted is trivial.
}
\item{results.path}{
The top level folder path for writing result files to. By default, read from the Options
file entry 'results.path'.
}
\item{groupColumn}{
Character string specifying one column of the annotation table, to give the group name for each sample.
}
\item{colorColumn}{
Character string specifying one column of the annotation table, to give the group color for each sample.
}
\item{folderName}{
Required character string, with no embedded blanks, used to name the folder of DE results that will be
generated by the DE tool. Typically, use a short but informative name that describes the groups being compared.
}
\item{altGeneMap}{
An alternate data frame of gene annotations, in a format identical to \code{\link{getCurrentGeneMap}},
that has the gene names and locations to be measured for differential expression. By default, use the
standard built-in gene map for this species.
}
\item{altGeneMapLabel}{
A character string identifier for the alternate gene map, that becomes part of all created path and file names
to indicate the gene map that produced the transcriptomes used in this DE analysis.
}
\item{targetID}{
Optional character string giving the target organism(s) being compared. Used by the gene plotting tools,
defaults to the current target.
}
\item{Ngenes}{
Number of gene to show in the HTML results and create gene plot images for.
}
\item{geneColumnHTML}{
The name of one column in the current gene map, that contains the identifier shown in the HTML results.
Some genomes require complex compound GeneIDs to give genomic location specificity, but are unwieldy for
routine use. This argument lets a second simpler identifier be used as a surrogate GeneID.
}
\item{keepIntergenics}{
Logical. By default, all transcriptomes keep gene expression values for defined intergenic "non-gene"
regions defined in the gene map. These intergenic regions can be included or excluded from the DE
fold change comparisons and results.
}
\item{label}{
A character string that is passed to the gene plot tool, for inclusion in the main plot header.
}
\item{doDE}{
Logical, controls whether the complete DE analysis is performed, or whether to just use results already
present in the DE subfolder. Typically used to just remake gene plot images.
}
\item{PLOT.FUN}{
An alternative function to use for generating gene plot images, that accepts a single GeneID as its
argument. Use \code{NA} to suppress all plotting.
}
\item{\dots}{
Other arguments passed down the to gene plotting function.
}
}
\details{
Even though these 5 DE tools implement different methods of determining differential expression
and take different input arguments, we use a common calling command line to simplify the use of all
5 tools and to standardize how they report their results.
The grouping column from the annotation file determines: how the samples are combined into groups,
the names for all result files, and the number of different groups being compared. When more than 2
groups are being compared, a K-ways comparison is performed such that each one group is compared against
all other groups combined, like a "Us against all other groups who are not us" strategy.
Each comparison creates a family of result files, with suffix names "UP" and "DOWN", to convey the
direction of each comparison. Note that in the case of just 2 groups, the UP and DOWN results are
virtually symmetric, but that is never true for 3+ group comparisons. Each comparison file uses a
composite naming strategy combining \code{<Group>.<Species>.<DEtool>.<DirectionSuffix>}.
}
\value{
A subfolder of result files, with a name constructed from the current species prefix and \code{folderName}.
For each group name, a set of DE result files in various formats:
\item{Ratio.txt}{A tab delimited file of all genes in the species, sorted by fold-change and P-value, that
includes all DE metrics returned by that DE tool.}
\item{UP.html }{}
\item{DOWN.html }{A pair of HTML files of gene expression showing just the top \code{Ngenes} genes
that are most differentially expressed for that comparison group and direction.}
\item{All.GeneData.txt}{A tab delimited matrix file of all genes in the species, giving the expression
values used by that DE tool (RPKM for some, READ counts for DESeq & EdgeR).}
\item{Cluster & PCA}{A set of .PNG plots that visually summarize the similarity of the transcriptsomes.
The Round Robin DE tool augments the clustering with "group average" transcriptomes as well.}
}
\author{
Bob Morrison
}
\seealso{
\code{\link{pipe.DiffExpression}} {for turning a set of transcriptomes into ratio files needed for RoundRobin}
\code{\link{pipe.MetaResults}} {for dispatching all DE tools and merging their results.}
}
\references{
\preformatted{
DESeq: Anders, Genome Biology (2010)
EdgeR: Robinson, Biostatistics (2008)
RankProduct: Breitling, FEBS Letters (2004)
RoundRobin: Morrison (unpublished)
SAM: Tusher, PNAS (2001)
}
}
|
/man/pipe.DEtools.Rd
|
no_license
|
robertdouglasmorrison/DuffyNGS
|
R
| false
| false
| 8,892
|
rd
|
\name{pipe.DEtools}
\alias{pipe.DESeq}
\alias{pipe.EdgeR}
\alias{pipe.RankProduct}
\alias{pipe.RoundRobin}
\alias{pipe.SAM}
\title{
Pipes for Group-wise Differential Expression Tools like DESeq, EdgeR, SAM, etc.
}
\description{
Wrapper functions to a family of published DE tools, to find significant
differentially expressed genes between groups of samples.
}
\usage{
pipe.DESeq(sampleIDset, speciesID = getCurrentSpecies(), annotationFile = "Annotation.txt",
optionsFile = "Options.txt", useMultiHits = TRUE, results.path = NULL,
groupColumn = "Group", colorColumn = "Color", folderName = "",
altGeneMap = NULL, altGeneMapLabel = NULL, targetID = NULL, Ngenes = 100,
geneColumnHTML = if (speciesID \%in\% MAMMAL_SPECIES) "NAME" else "GENE_ID",
keepIntergenics = FALSE, verbose = !interactive(), label = "",
doDE = TRUE, PLOT.FUN = NULL, ...)
pipe.EdgeR(sampleIDset, speciesID = getCurrentSpecies(), annotationFile = "Annotation.txt",
optionsFile = "Options.txt", useMultiHits = TRUE, results.path = NULL,
groupColumn = "Group", colorColumn = "Color", folderName = "",
altGeneMap = NULL, altGeneMapLabel = NULL, targetID = NULL, Ngenes = 100,
geneColumnHTML = if (speciesID \%in\% MAMMAL_SPECIES) "NAME" else "GENE_ID",
keepIntergenics = FALSE, verbose = !interactive(), label = "",
doDE = TRUE, PLOT.FUN = NULL, ...)
pipe.RankProduct(sampleIDset, speciesID = getCurrentSpecies(), annotationFile = "Annotation.txt",
optionsFile = "Options.txt", useMultiHits = TRUE, results.path = NULL,
groupColumn = "Group", colorColumn = "Color", folderName = "",
altGeneMap = NULL, altGeneMapLabel = NULL, targetID = NULL, Ngenes = 100,
geneColumnHTML = if (speciesID \%in\% MAMMAL_SPECIES) "NAME" else "GENE_ID",
keepIntergenics = FALSE, verbose = !interactive(), label = "",
doDE = TRUE, PLOT.FUN = NULL, ...)
pipe.RoundRobin(sampleIDset, speciesID = getCurrentSpecies(), annotationFile = "Annotation.txt",
optionsFile = "Options.txt", useMultiHits = TRUE, results.path = NULL,
groupColumn = "Group", colorColumn = "Color", folderName = "",
altGeneMap = NULL, altGeneMapLabel = NULL, targetID = NULL, Ngenes = 100,
geneColumnHTML = if (speciesID \%in\% MAMMAL_SPECIES) "NAME" else "GENE_ID",
keepIntergenics = FALSE, verbose = !interactive(), label = "",
doDE = TRUE, PLOT.FUN = NULL, ...)
pipe.SAM(sampleIDset, speciesID = getCurrentSpecies(), annotationFile = "Annotation.txt",
optionsFile = "Options.txt", useMultiHits = TRUE, results.path = NULL,
groupColumn = "Group", colorColumn = "Color", folderName = "",
altGeneMap = NULL, altGeneMapLabel = NULL, targetID = NULL, Ngenes = 100,
geneColumnHTML = if (speciesID \%in\% MAMMAL_SPECIES) "NAME" else "GENE_ID",
keepIntergenics = FALSE, verbose = !interactive(), label = "",
doDE = TRUE, PLOT.FUN = NULL, ...)
}
\arguments{
\item{sampleIDset}{
Character vector of SampleIDs, giving the full set of samples that will take part in the DE calculations.
}
\item{speciesID}{
The SpeciesID for one single species. The DE tools do not operate on multipe species at one time.
}
\item{annotationFile}{
File of sample annotation details, which specifies all needed
sample-specific information about the samples under study.
See \code{\link{DuffyNGS_Annotation}}.
}
\item{optionsFile}{
File of processing options, which specifies all processing
parameters that are not sample specific. See \code{\link{DuffyNGS_Options}}.
}
\item{useMultiHits}{
Logical. By default, all DE tools use the RPKM or READ values from the transcriptomes that correspond
to keeping all aligned reads, including those alignments called "MultiHit" reads. If \code{FALSE}, this
behavior can be restricted to only using uniquely mapped reads. Since the transcriptomes store both
methods of counting gene abundance, changing how the DE results may be impacted is trivial.
}
\item{results.path}{
The top level folder path for writing result files to. By default, read from the Options
file entry 'results.path'.
}
\item{groupColumn}{
Character string specifying one column of the annotation table, to give the group name for each sample.
}
\item{colorColumn}{
Character string specifying one column of the annotation table, to give the group color for each sample.
}
\item{folderName}{
Required character string, with no embedded blanks, used to name the folder of DE results that will be
generated by the DE tool. Typically, use a short but informative name that describes the groups being compared.
}
\item{altGeneMap}{
An alternate data frame of gene annotations, in a format identical to \code{\link{getCurrentGeneMap}},
that has the gene names and locations to be measured for differential expression. By default, use the
standard built-in gene map for this species.
}
\item{altGeneMapLabel}{
A character string identifier for the alternate gene map, that becomes part of all created path and file names
to indicate the gene map that produced the transcriptomes used in this DE analysis.
}
\item{targetID}{
Optional character string giving the target organism(s) being compared. Used by the gene plotting tools,
defaults to the current target.
}
\item{Ngenes}{
Number of gene to show in the HTML results and create gene plot images for.
}
\item{geneColumnHTML}{
The name of one column in the current gene map, that contains the identifier shown in the HTML results.
Some genomes require complex compound GeneIDs to give genomic location specificity, but are unwieldy for
routine use. This argument lets a second simpler identifier be used as a surrogate GeneID.
}
\item{keepIntergenics}{
Logical. By default, all transcriptomes keep gene expression values for defined intergenic "non-gene"
regions defined in the gene map. These intergenic regions can be included or excluded from the DE
fold change comparisons and results.
}
\item{label}{
A character string that is passed to the gene plot tool, for inclusion in the main plot header.
}
\item{doDE}{
Logical, controls whether the complete DE analysis is performed, or whether to just use results already
present in the DE subfolder. Typically used to just remake gene plot images.
}
\item{PLOT.FUN}{
An alternative function to use for generating gene plot images, that accepts a single GeneID as its
argument. Use \code{NA} to suppress all plotting.
}
\item{\dots}{
Other arguments passed down the to gene plotting function.
}
}
\details{
Even though these 5 DE tools implement different methods of determining differential expression
and take different input arguments, we use a common calling command line to simplify the use of all
5 tools and to standardize how they report their results.
The grouping column from the annotation file determines: how the samples are combined into groups,
the names for all result files, and the number of different groups being compared. When more than 2
groups are being compared, a K-ways comparison is performed such that each one group is compared against
all other groups combined, like a "Us against all other groups who are not us" strategy.
Each comparison creates a family of result files, with suffix names "UP" and "DOWN", to convey the
direction of each comparison. Note that in the case of just 2 groups, the UP and DOWN results are
virtually symmetric, but that is never true for 3+ group comparisons. Each comparison file uses a
composite naming strategy combining \code{<Group>.<Species>.<DEtool>.<DirectionSuffix>}.
}
\value{
A subfolder of result files, with a name constructed from the current species prefix and \code{folderName}.
For each group name, a set of DE result files in various formats:
\item{Ratio.txt}{A tab delimited file of all genes in the species, sorted by fold-change and P-value, that
includes all DE metrics returned by that DE tool.}
\item{UP.html }{}
\item{DOWN.html }{A pair of HTML files of gene expression showing just the top \code{Ngenes} genes
that are most differentially expressed for that comparison group and direction.}
\item{All.GeneData.txt}{A tab delimited matrix file of all genes in the species, giving the expression
values used by that DE tool (RPKM for some, READ counts for DESeq & EdgeR).}
\item{Cluster & PCA}{A set of .PNG plots that visually summarize the similarity of the transcriptsomes.
The Round Robin DE tool augments the clustering with "group average" transcriptomes as well.}
}
\author{
Bob Morrison
}
\seealso{
\code{\link{pipe.DiffExpression}} {for turning a set of transcriptomes into ratio files needed for RoundRobin}
\code{\link{pipe.MetaResults}} {for dispatching all DE tools and merging their results.}
}
\references{
\preformatted{
DESeq: Anders, Genome Biology (2010)
EdgeR: Robinson, Biostatistics (2008)
RankProduct: Breitling, FEBS Letters (2004)
RoundRobin: Morrison (unpublished)
SAM: Tusher, PNAS (2001)
}
}
|
## File Name: sirt_import_coda_effectiveSize.R
## File Version: 0.01
sirt_import_coda_effectiveSize <- function(...)
{
TAM::require_namespace_msg("coda")
res <- coda::effectiveSize(...)
return(res)
}
|
/sirt/R/sirt_import_coda_effectiveSize.R
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false
| false
| 224
|
r
|
## File Name: sirt_import_coda_effectiveSize.R
## File Version: 0.01
sirt_import_coda_effectiveSize <- function(...)
{
TAM::require_namespace_msg("coda")
res <- coda::effectiveSize(...)
return(res)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coloredmesh.R
\name{coloredmeshes.from.color}
\alias{coloredmeshes.from.color}
\title{Create coloredmeshes for both hemis using pre-defined colors.}
\usage{
coloredmeshes.from.color(
subjects_dir,
subject_id,
color_data,
hemi,
surface = "white",
metadata = list()
)
}
\arguments{
\item{subjects_dir}{string. The FreeSurfer SUBJECTS_DIR, i.e., a directory containing the data for all your subjects, each in a subdir named after the subject identifier.}
\item{subject_id}{string. The subject identifier.}
\item{color_data}{a hemilist containing vectors of hex color strings}
\item{hemi}{string, one of 'lh' or 'rh'. The hemisphere name. Used to construct the names of the label data files to be loaded.}
\item{surface}{character string or `fs.surface` instance. The display surface. E.g., "white", "pial", or "inflated". Defaults to "white".}
\item{metadata}{a named list, can contain whatever you want. Typical entries are: 'src_data' a hemilist containing the source data from which the 'color_data' was created, optional. If available, it is encoded into the coloredmesh and can be used later to plot a colorbar. 'makecmap_options': the options used to created the colormap from the data.}
}
\value{
named list of coloredmeshes. Each entry is a named list with entries: "mesh" the \code{\link{tmesh3d}} mesh object. "col": the mesh colors. "render", logical, whether to render the mesh. "hemi": the hemisphere, one of 'lh' or 'rh'.
}
\description{
Create coloredmeshes for both hemis using pre-defined colors.
}
\seealso{
Other coloredmesh functions:
\code{\link{coloredmesh.from.annot}()},
\code{\link{coloredmesh.from.label}()},
\code{\link{coloredmesh.from.mask}()},
\code{\link{coloredmesh.from.morph.native}()},
\code{\link{coloredmesh.from.morph.standard}()},
\code{\link{coloredmesh.from.morphdata}()}
}
\concept{coloredmesh functions}
|
/man/coloredmeshes.from.color.Rd
|
permissive
|
dfsp-spirit/fsbrain
|
R
| false
| true
| 1,940
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coloredmesh.R
\name{coloredmeshes.from.color}
\alias{coloredmeshes.from.color}
\title{Create coloredmeshes for both hemis using pre-defined colors.}
\usage{
coloredmeshes.from.color(
subjects_dir,
subject_id,
color_data,
hemi,
surface = "white",
metadata = list()
)
}
\arguments{
\item{subjects_dir}{string. The FreeSurfer SUBJECTS_DIR, i.e., a directory containing the data for all your subjects, each in a subdir named after the subject identifier.}
\item{subject_id}{string. The subject identifier.}
\item{color_data}{a hemilist containing vectors of hex color strings}
\item{hemi}{string, one of 'lh' or 'rh'. The hemisphere name. Used to construct the names of the label data files to be loaded.}
\item{surface}{character string or `fs.surface` instance. The display surface. E.g., "white", "pial", or "inflated". Defaults to "white".}
\item{metadata}{a named list, can contain whatever you want. Typical entries are: 'src_data' a hemilist containing the source data from which the 'color_data' was created, optional. If available, it is encoded into the coloredmesh and can be used later to plot a colorbar. 'makecmap_options': the options used to created the colormap from the data.}
}
\value{
named list of coloredmeshes. Each entry is a named list with entries: "mesh" the \code{\link{tmesh3d}} mesh object. "col": the mesh colors. "render", logical, whether to render the mesh. "hemi": the hemisphere, one of 'lh' or 'rh'.
}
\description{
Create coloredmeshes for both hemis using pre-defined colors.
}
\seealso{
Other coloredmesh functions:
\code{\link{coloredmesh.from.annot}()},
\code{\link{coloredmesh.from.label}()},
\code{\link{coloredmesh.from.mask}()},
\code{\link{coloredmesh.from.morph.native}()},
\code{\link{coloredmesh.from.morph.standard}()},
\code{\link{coloredmesh.from.morphdata}()}
}
\concept{coloredmesh functions}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/setupX.R
\name{setupX}
\alias{setupX}
\title{Set up design matrix X by reading data from big data file}
\usage{
setupX(
filename,
dir = getwd(),
sep = ",",
backingfile = paste0(unlist(strsplit(filename, split = "\\\\."))[1], ".bin"),
descriptorfile = paste0(unlist(strsplit(filename, split = "\\\\."))[1], ".desc"),
type = "double",
...
)
}
\arguments{
\item{filename}{The name of the data file. For example, "dat.txt".}
\item{dir}{The directory used to store the binary and descriptor files
associated with the \code{big.matrix}. The default is current working
directory.}
\item{sep}{The field separator character. For example, "," for
comma-delimited files (the default); "\\t" for tab-delimited files.}
\item{backingfile}{The binary file associated with the file-backed
\code{big.matrix}. By default, its name is the same as \code{filename} with
the extension replaced by ".bin".}
\item{descriptorfile}{The descriptor file used for the description of the
file-backed \code{big.matrix}. By default, its name is the same as
\code{filename} with the extension replaced by ".desc".}
\item{type}{The data type. Only "double" is supported for now.}
\item{...}{Additional arguments that can be passed into function
\code{\link[bigmemory]{read.big.matrix}}.}
}
\value{
A \code{big.matrix} object corresponding to a file-backed
\code{big.matrix}. It's ready to be used as the design matrix \code{X} in
\code{\link{biglasso}} and \code{\link{cv.biglasso}}.
}
\description{
Set up the design matrix X as a \code{big.matrix} object based on external
massive data file stored on disk that cannot be fullly loaded into memory.
The data file must be a well-formated ASCII-file, and contains only one
single type. Current version only supports \code{double} type. Other
restrictions about the data file are described in
\code{\link{biglasso-package}}. This function reads the massive data, and
creates a \code{big.matrix} object. By default, the resulting
\code{big.matrix} is file-backed, and can be shared across processors or
nodes of a cluster.
}
\details{
For a data set, this function needs to be called only one time to set up the
\code{big.matrix} object with two backing files (.bin, .desc) created in
current working directory. Once set up, the data can be "loaded" into any
(new) R session by calling \code{attach.big.matrix(discriptorfile)}.
This function is a simple wrapper of
\code{\link[bigmemory]{read.big.matrix}}. See
\code{\link[bigmemory]{read.big.matrix}} and the package
\href{https://CRAN.R-project.org/package=bigmemory}{bigmemory} for more
details.
}
\examples{
## see the example in "biglasso-package"
}
\seealso{
\code{\link{biglasso}}, \code{\link{cv.ncvreg}}
}
\author{
Yaohui Zeng and Patrick Breheny
Maintainer: Yaohui Zeng <yaohui.zeng@gmail.com>
}
|
/man/setupX.Rd
|
no_license
|
YaohuiZeng/biglasso
|
R
| false
| true
| 2,949
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/setupX.R
\name{setupX}
\alias{setupX}
\title{Set up design matrix X by reading data from big data file}
\usage{
setupX(
filename,
dir = getwd(),
sep = ",",
backingfile = paste0(unlist(strsplit(filename, split = "\\\\."))[1], ".bin"),
descriptorfile = paste0(unlist(strsplit(filename, split = "\\\\."))[1], ".desc"),
type = "double",
...
)
}
\arguments{
\item{filename}{The name of the data file. For example, "dat.txt".}
\item{dir}{The directory used to store the binary and descriptor files
associated with the \code{big.matrix}. The default is current working
directory.}
\item{sep}{The field separator character. For example, "," for
comma-delimited files (the default); "\\t" for tab-delimited files.}
\item{backingfile}{The binary file associated with the file-backed
\code{big.matrix}. By default, its name is the same as \code{filename} with
the extension replaced by ".bin".}
\item{descriptorfile}{The descriptor file used for the description of the
file-backed \code{big.matrix}. By default, its name is the same as
\code{filename} with the extension replaced by ".desc".}
\item{type}{The data type. Only "double" is supported for now.}
\item{...}{Additional arguments that can be passed into function
\code{\link[bigmemory]{read.big.matrix}}.}
}
\value{
A \code{big.matrix} object corresponding to a file-backed
\code{big.matrix}. It's ready to be used as the design matrix \code{X} in
\code{\link{biglasso}} and \code{\link{cv.biglasso}}.
}
\description{
Set up the design matrix X as a \code{big.matrix} object based on external
massive data file stored on disk that cannot be fullly loaded into memory.
The data file must be a well-formated ASCII-file, and contains only one
single type. Current version only supports \code{double} type. Other
restrictions about the data file are described in
\code{\link{biglasso-package}}. This function reads the massive data, and
creates a \code{big.matrix} object. By default, the resulting
\code{big.matrix} is file-backed, and can be shared across processors or
nodes of a cluster.
}
\details{
For a data set, this function needs to be called only one time to set up the
\code{big.matrix} object with two backing files (.bin, .desc) created in
current working directory. Once set up, the data can be "loaded" into any
(new) R session by calling \code{attach.big.matrix(discriptorfile)}.
This function is a simple wrapper of
\code{\link[bigmemory]{read.big.matrix}}. See
\code{\link[bigmemory]{read.big.matrix}} and the package
\href{https://CRAN.R-project.org/package=bigmemory}{bigmemory} for more
details.
}
\examples{
## see the example in "biglasso-package"
}
\seealso{
\code{\link{biglasso}}, \code{\link{cv.ncvreg}}
}
\author{
Yaohui Zeng and Patrick Breheny
Maintainer: Yaohui Zeng <yaohui.zeng@gmail.com>
}
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604796L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
/IntervalSurgeon/inst/testfiles/rcpp_pile/AFL_rcpp_pile/rcpp_pile_valgrind_files/1609873765-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 729
|
r
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604796L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
require(devtools)
require(testthat)
require(R.rsp)
options(error = NULL)
load_all()
roxygen2::roxygenize()
build_vignettes()
|
/development.R
|
no_license
|
MirkoTh/mtR
|
R
| false
| false
| 130
|
r
|
require(devtools)
require(testthat)
require(R.rsp)
options(error = NULL)
load_all()
roxygen2::roxygenize()
build_vignettes()
|
############################################################################################
### SETTINGS
############################################################################################
source("~/swissinfo/_helpers/helpers.R")
library(animation)
displayStatistics <- F
data.file <- 'contenate_allData.csv'
text.file <- 'trad.csv'
############################################################################################
### HELPERS
############################################################################################
ggthemeNoFont <- {
#based theme_bw eliminates baground, gridlines, and chart border
theme_bw() + theme(plot.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.border = element_blank(), panel.background = element_blank(), axis.ticks = element_line(size = 0.2),
plot.title = element_text(hjust = 0),panel.grid.major = element_line(colour = "#efe9e0")
)
}
############################################################################################
### Load translation data
############################################################################################
text <- read.csv(text.file, stringsAsFactors = F)
############################################################################################
### Load all data in one data.frame
############################################################################################
data <- read.csv(data.file, stringsAsFactors = F)
colnames(data)[2:ncol(data)] <- gsub("^X", "", colnames(data)[2:ncol(data)])
data <- data.frame(Age = data[,1], melt(data[,2:ncol(data)]))
colnames(data) <- c('Age', 'Annee', "value")
data$Annee <- as.numeric(as.character(data$Annee))
# keep age in datan for statistics computation
datan <- data
data$Age <- reorder(data$Age, as.numeric(gsub("\\+$", "", as.character(data$Age))))
datan$Age <- as.numeric(gsub("\\+$", "", as.character(datan$Age)))
# compute for each year, the proportion of by age group
# library(plyr)
# prop <- plyr::ddply(data, .(Annee), summarize,
# Age = Age,
# value = value,
# prop = value / sum(value) * 100)
prop <- do.call(rbind, by(data, data$Annee, function(d) {
data.frame(Age = d$Age, Annee = d$Annee[1], value = d$value, prop = d$value / sum(d$value) * 100)
}))
rownames(prop) <- NULL
xlabel <- rep('', nlevels(data$Age))
idx.x <- c(seq(min(as.numeric(data$Age)), max(as.numeric(data$Age)), 10), nlevels(data$Age))
xlabel[idx.x]<- levels(data$Age)[idx.x]
# load logo
g <- rasterGrob(swi_logo, interpolate=TRUE)
### Get some key numbers
if(displayStatistics) {
sum(dplyr::filter(datan, Annee == 1860, Age > 65)$value) / sum(dplyr::filter(datan, Annee == 1860, Age >= 20, Age <= 64)$value)
sum(dplyr::filter(datan, Annee == 1901, Age > 65)$value) / sum(dplyr::filter(datan, Annee == 1901, Age >= 20, Age <= 64)$value)
sum(dplyr::filter(datan, Annee == 2012, Age > 65)$value) / sum(dplyr::filter(datan, Annee == 2012, Age >= 20, Age <= 64)$value)
}
plotayear2 <- function(data, a, title = "", descr = "", xlab = 'Age', ylab = "%", family = font) {
dd <- prop[prop$Annee == a,]
ghist <- ggplot(data = dd) + geom_bar(aes(Age, prop), size =0.01, stat = "identity",
color = swi_9palette[4], fill = swi_9palette[5]) + ggthemeNoFont + scale_x_discrete(xlab, xlabel) +
scale_y_continuous(name = ylab, limits = c(0, max(prop$prop)), expand = c(0.005,0.005)) +
# the year in big
geom_text(data = data.frame(x = levels(prop$Age)[nlevels(prop$Age)-5], y = max(prop$prop)-0.67, label = as.character(a)),
aes(label = label, x = x, y = y), family = family, alpha = 0.6, size = 60, color = swi_9palette[9], hjust = 1) +
# the title
geom_text(data = data.frame(x = levels(prop$Age)[1],
y = max(prop$prop)-0.05, label = title), aes(label = label, x = x, y = y), family = family, alpha = 1, size = 9, hjust = 0, vjust = 0,
fontface ="bold") +
# the description
geom_text(data = data.frame(x = levels(prop$Age)[1],
y = max(prop$prop)-0.157, label = descr), aes(label = label, x = x, y = y), family = family, alpha = 0.8, size = 6, hjust = 0,vjust =0) +
# theme
theme(axis.text = element_text(size = rel(1), lineheight = 0), plot.margin = unit(c(0.8,1,1.1,0), "lines"),
axis.title = element_text(size = rel(1.5)), text = element_text(family = family))
ghista <- ghist + annotation_custom(grob = g, xmin = nlevels(prop$Age)-nlevels(prop$Age)/8, xmax = nlevels(prop$Age),
ymin = -0.15, ymax = -0.22)
gt <- ggplot_gtable(ggplot_build(ghista))
gt$layout$clip[gt$layout$name=="panel"] <- "off"
grid.newpage()
grid.draw(gt)
}
a <- unique(prop$Annee)[10]
plotayear2(data, a, text[1,1], text[2,1], text[3,1], text[4,1], text[5,1])
# some tests for exotic characters
plotayear2(data, a, iconv(text[1,4]), text[2,4], text[3,4], family = "")
i <- 5
plotayear2(data, a, iconv(text[1,i]), text[2,i], text[3,i], family = "")
plotayear2(data, a, iconv(text[1,i]), text[2,i], text[3,i] )
# take only every 4 years
data.sub <- data[data$Annee %% 4 == 0,]
for(i in 1:ncol(text)) {
fontToBeUsed <- text[5,i]
cat("\n", colnames(text)[i], "\t with font:", fontToBeUsed, "\n")
saveGIF({
for(a in c(unique(data.sub$Annee), 2012)) {
plotayear2(data.sub, a, title = text[1,i], descr = text[2,i], xlab = text[3,i], ylab = text[4,i], family = fontToBeUsed)
}
}, movie.name = paste("populationAge_", colnames(text)[i], ".gif", sep =""), interval = 0.35, nmax = 50, ani.width = 640*1.1,
ani.height = 640*1.1, loop = TRUE, outdir = getwd())
}
# i <- 1
# plotayear2(data, a, iconv(text[1,i]), text[2,i], text[3,i], family = "Hei")
#
#
# i <- 4
# plotayear2(data, a, iconv(text[1,i]), text[2,i], text[3,i], family = "Hei")
# i <- 5
# plotayear2(data, a, iconv(text[1,i]), text[2,i], text[3,i], family = "Osaka")
#
#
# #Sys.setlocale(locale =c ('zh_CN.UTF-8'))
# x <- read.csv(textConnection("
# 名称,类,学生
# 木材,2,2
# 表,3,4
# 笔,4,2
# 垃圾桶,5,6
# 杯,6,3"), header = TRUE)
# rownames(x) <- x[,1]
# x <- x[,-1]
# barplot(t(x), horiz = TRUE, beside = TRUE, legend.text = TRUE)
# barplot(t(x), horiz = TRUE, beside = TRUE, legend.text = TRUE, family = "Hei")
|
/prod/vieillissement/01_ageDistribution_prod.R
|
no_license
|
d-qn/2014_07_03_evolutionDeLaPopulationSuisse
|
R
| false
| false
| 6,148
|
r
|
############################################################################################
### SETTINGS
############################################################################################
source("~/swissinfo/_helpers/helpers.R")
library(animation)
displayStatistics <- F
data.file <- 'contenate_allData.csv'
text.file <- 'trad.csv'
############################################################################################
### HELPERS
############################################################################################
ggthemeNoFont <- {
#based theme_bw eliminates baground, gridlines, and chart border
theme_bw() + theme(plot.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.border = element_blank(), panel.background = element_blank(), axis.ticks = element_line(size = 0.2),
plot.title = element_text(hjust = 0),panel.grid.major = element_line(colour = "#efe9e0")
)
}
############################################################################################
### Load translation data
############################################################################################
text <- read.csv(text.file, stringsAsFactors = F)
############################################################################################
### Load all data in one data.frame
############################################################################################
data <- read.csv(data.file, stringsAsFactors = F)
colnames(data)[2:ncol(data)] <- gsub("^X", "", colnames(data)[2:ncol(data)])
data <- data.frame(Age = data[,1], melt(data[,2:ncol(data)]))
colnames(data) <- c('Age', 'Annee', "value")
data$Annee <- as.numeric(as.character(data$Annee))
# keep age in datan for statistics computation
datan <- data
data$Age <- reorder(data$Age, as.numeric(gsub("\\+$", "", as.character(data$Age))))
datan$Age <- as.numeric(gsub("\\+$", "", as.character(datan$Age)))
# compute for each year, the proportion of by age group
# library(plyr)
# prop <- plyr::ddply(data, .(Annee), summarize,
# Age = Age,
# value = value,
# prop = value / sum(value) * 100)
prop <- do.call(rbind, by(data, data$Annee, function(d) {
data.frame(Age = d$Age, Annee = d$Annee[1], value = d$value, prop = d$value / sum(d$value) * 100)
}))
rownames(prop) <- NULL
xlabel <- rep('', nlevels(data$Age))
idx.x <- c(seq(min(as.numeric(data$Age)), max(as.numeric(data$Age)), 10), nlevels(data$Age))
xlabel[idx.x]<- levels(data$Age)[idx.x]
# load logo
g <- rasterGrob(swi_logo, interpolate=TRUE)
### Get some key numbers
if(displayStatistics) {
sum(dplyr::filter(datan, Annee == 1860, Age > 65)$value) / sum(dplyr::filter(datan, Annee == 1860, Age >= 20, Age <= 64)$value)
sum(dplyr::filter(datan, Annee == 1901, Age > 65)$value) / sum(dplyr::filter(datan, Annee == 1901, Age >= 20, Age <= 64)$value)
sum(dplyr::filter(datan, Annee == 2012, Age > 65)$value) / sum(dplyr::filter(datan, Annee == 2012, Age >= 20, Age <= 64)$value)
}
plotayear2 <- function(data, a, title = "", descr = "", xlab = 'Age', ylab = "%", family = font) {
dd <- prop[prop$Annee == a,]
ghist <- ggplot(data = dd) + geom_bar(aes(Age, prop), size =0.01, stat = "identity",
color = swi_9palette[4], fill = swi_9palette[5]) + ggthemeNoFont + scale_x_discrete(xlab, xlabel) +
scale_y_continuous(name = ylab, limits = c(0, max(prop$prop)), expand = c(0.005,0.005)) +
# the year in big
geom_text(data = data.frame(x = levels(prop$Age)[nlevels(prop$Age)-5], y = max(prop$prop)-0.67, label = as.character(a)),
aes(label = label, x = x, y = y), family = family, alpha = 0.6, size = 60, color = swi_9palette[9], hjust = 1) +
# the title
geom_text(data = data.frame(x = levels(prop$Age)[1],
y = max(prop$prop)-0.05, label = title), aes(label = label, x = x, y = y), family = family, alpha = 1, size = 9, hjust = 0, vjust = 0,
fontface ="bold") +
# the description
geom_text(data = data.frame(x = levels(prop$Age)[1],
y = max(prop$prop)-0.157, label = descr), aes(label = label, x = x, y = y), family = family, alpha = 0.8, size = 6, hjust = 0,vjust =0) +
# theme
theme(axis.text = element_text(size = rel(1), lineheight = 0), plot.margin = unit(c(0.8,1,1.1,0), "lines"),
axis.title = element_text(size = rel(1.5)), text = element_text(family = family))
ghista <- ghist + annotation_custom(grob = g, xmin = nlevels(prop$Age)-nlevels(prop$Age)/8, xmax = nlevels(prop$Age),
ymin = -0.15, ymax = -0.22)
gt <- ggplot_gtable(ggplot_build(ghista))
gt$layout$clip[gt$layout$name=="panel"] <- "off"
grid.newpage()
grid.draw(gt)
}
a <- unique(prop$Annee)[10]
plotayear2(data, a, text[1,1], text[2,1], text[3,1], text[4,1], text[5,1])
# some tests for exotic characters
plotayear2(data, a, iconv(text[1,4]), text[2,4], text[3,4], family = "")
i <- 5
plotayear2(data, a, iconv(text[1,i]), text[2,i], text[3,i], family = "")
plotayear2(data, a, iconv(text[1,i]), text[2,i], text[3,i] )
# take only every 4 years
data.sub <- data[data$Annee %% 4 == 0,]
for(i in 1:ncol(text)) {
fontToBeUsed <- text[5,i]
cat("\n", colnames(text)[i], "\t with font:", fontToBeUsed, "\n")
saveGIF({
for(a in c(unique(data.sub$Annee), 2012)) {
plotayear2(data.sub, a, title = text[1,i], descr = text[2,i], xlab = text[3,i], ylab = text[4,i], family = fontToBeUsed)
}
}, movie.name = paste("populationAge_", colnames(text)[i], ".gif", sep =""), interval = 0.35, nmax = 50, ani.width = 640*1.1,
ani.height = 640*1.1, loop = TRUE, outdir = getwd())
}
# i <- 1
# plotayear2(data, a, iconv(text[1,i]), text[2,i], text[3,i], family = "Hei")
#
#
# i <- 4
# plotayear2(data, a, iconv(text[1,i]), text[2,i], text[3,i], family = "Hei")
# i <- 5
# plotayear2(data, a, iconv(text[1,i]), text[2,i], text[3,i], family = "Osaka")
#
#
# #Sys.setlocale(locale =c ('zh_CN.UTF-8'))
# x <- read.csv(textConnection("
# 名称,类,学生
# 木材,2,2
# 表,3,4
# 笔,4,2
# 垃圾桶,5,6
# 杯,6,3"), header = TRUE)
# rownames(x) <- x[,1]
# x <- x[,-1]
# barplot(t(x), horiz = TRUE, beside = TRUE, legend.text = TRUE)
# barplot(t(x), horiz = TRUE, beside = TRUE, legend.text = TRUE, family = "Hei")
|
"permutation.test.fun" <-
function (x, y = NULL, fun = function(x, y) sum(x * y), alternative = "greater",
trials = 1000)
{
if (length(y)) {
n <- length(y)
if (length(x) != n)
stop("x and y have different lengths")
if (!is.numeric(y))
stop("y must be numeric")
}
else {
if (ncol(x) != 2)
stop("x does not have 2 columns and y is missing")
x <- as.matrix(x)
if (!is.numeric(x))
stop("x must be numeric")
y <- x[, 2]
x <- x[, 1]
n <- length(y)
}
if (length(alternative) != 1 || !is.character(alternative))
stop("alternative must be a single character string")
altnum <- pmatch(alternative, c("greater", "less"), nomatch = NA)
if (is.na(altnum))
stop("alternative must partially match 'greater' or 'less'")
alternative <- c("greater", "less")[altnum]
if(!exists(".Random.seed")) runif(1)
ranseed <- .Random.seed
orig.score <- fun(x, y)
if (length(orig.score) != 1)
stop("fun must return a single number")
perm.scores <- numeric(trials)
for (i in 1:trials) {
perm.scores[i] <- fun(x, sample(y))
}
if (alternative == "greater") {
extreme <- sum(perm.scores >= orig.score)
}
else {
extreme <- sum(perm.scores <= orig.score)
}
ans <- list(original.score = orig.score, perm.scores = perm.scores,
stats = c(nobs = n, trials = trials, extreme = extreme),
alternative = alternative, random.seed = ranseed, call = match.call())
class(ans) <- "permtstBurSt"
ans
}
|
/R/permutation.test.fun.R
|
no_license
|
cran/BurStMisc
|
R
| false
| false
| 1,675
|
r
|
"permutation.test.fun" <-
function (x, y = NULL, fun = function(x, y) sum(x * y), alternative = "greater",
trials = 1000)
{
if (length(y)) {
n <- length(y)
if (length(x) != n)
stop("x and y have different lengths")
if (!is.numeric(y))
stop("y must be numeric")
}
else {
if (ncol(x) != 2)
stop("x does not have 2 columns and y is missing")
x <- as.matrix(x)
if (!is.numeric(x))
stop("x must be numeric")
y <- x[, 2]
x <- x[, 1]
n <- length(y)
}
if (length(alternative) != 1 || !is.character(alternative))
stop("alternative must be a single character string")
altnum <- pmatch(alternative, c("greater", "less"), nomatch = NA)
if (is.na(altnum))
stop("alternative must partially match 'greater' or 'less'")
alternative <- c("greater", "less")[altnum]
if(!exists(".Random.seed")) runif(1)
ranseed <- .Random.seed
orig.score <- fun(x, y)
if (length(orig.score) != 1)
stop("fun must return a single number")
perm.scores <- numeric(trials)
for (i in 1:trials) {
perm.scores[i] <- fun(x, sample(y))
}
if (alternative == "greater") {
extreme <- sum(perm.scores >= orig.score)
}
else {
extreme <- sum(perm.scores <= orig.score)
}
ans <- list(original.score = orig.score, perm.scores = perm.scores,
stats = c(nobs = n, trials = trials, extreme = extreme),
alternative = alternative, random.seed = ranseed, call = match.call())
class(ans) <- "permtstBurSt"
ans
}
|
library(RPostgreSQL)
con <- dbConnect(dbDriver("PostgreSQL"), user="postgres", password="postgres", dbname="ed",host="127.0.0.1",port=9876)
|
/R/0_init.R
|
no_license
|
tloszabno/ed-projekt
|
R
| false
| false
| 146
|
r
|
library(RPostgreSQL)
con <- dbConnect(dbDriver("PostgreSQL"), user="postgres", password="postgres", dbname="ed",host="127.0.0.1",port=9876)
|
wbcd <- read.csv('wisc_bc_data.csv', stringsAsFactors=FALSE)
wbcd <- wbcd[-1]
wbcd$diagnosis <- factor(wbcd$diagnosis, levels=c('B','M'),
labels=c('Benign','Malignant'))
wbcd_z <- as.data.frame(scale(wbcd[-1]))
wbcd_train <- wbcd_z[1:469,]
wbcd_test <- wbcd_z[470:569,]
wbcd_train_labels <- wbcd[1:469,1]
wbcd_test_labels <- wbcd[470:569,1]
require(class)
wbcd_test_pred <- knn(train=wbcd_train, test=wbcd_test,
cl=wbcd_train_labels, k=21)
require(gmodels)
CrossTable(x=wbcd_test_labels, y=wbcd_test_pred, prop.chisq=FALSE)
|
/MLwR/wbcd.R
|
permissive
|
glu99331/R-lang
|
R
| false
| false
| 574
|
r
|
wbcd <- read.csv('wisc_bc_data.csv', stringsAsFactors=FALSE)
wbcd <- wbcd[-1]
wbcd$diagnosis <- factor(wbcd$diagnosis, levels=c('B','M'),
labels=c('Benign','Malignant'))
wbcd_z <- as.data.frame(scale(wbcd[-1]))
wbcd_train <- wbcd_z[1:469,]
wbcd_test <- wbcd_z[470:569,]
wbcd_train_labels <- wbcd[1:469,1]
wbcd_test_labels <- wbcd[470:569,1]
require(class)
wbcd_test_pred <- knn(train=wbcd_train, test=wbcd_test,
cl=wbcd_train_labels, k=21)
require(gmodels)
CrossTable(x=wbcd_test_labels, y=wbcd_test_pred, prop.chisq=FALSE)
|
#' Calculate a factor analysis for a Rasch Model
#'
#' @param df a data frame of individual survey data, where each row is an individual
#' @param vars_metric a character vector of items to use in the Rasch Analysis
#' @param print_results a logical vector indicating whether to print the results of the model to the \code{model_name} directory
#' @param path_output a string with the path to the output folder. Default is NULL.
#'
#' @return a list with results from the factor analysis for a Rasch Model:
#' \item{cor_poly}{the matrix of polychoric correlations}
#' \item{eigenvalues}{the eigenvalues}
#' \item{parallel_analysis}{permutation parallel analysis distribution}
#' \item{results_scree}{results of a scree analysis}
#' \item{n_group_factors}{number of factors from the parallel analysis in the scree analysis}
#' \item{fa_onefactor}{results from factor analysis with one factor}
#' \item{fa_resid}{local dependency based on polychoric correlations of the items}
#'
#' @details Unidimensionality of the data is one of the core assumptions of the Rasch Model. This function performs the factor analysis to assess the unidimensionality of the data.
#'
#' @family rasch functions
#'
#' @export
#'
#' @import dplyr
rasch_factor <- function(df, vars_metric, print_results = TRUE, path_output = NULL) {
#----------------------------
#convert to tibble
if (!tibble::is_tibble(df)) df <- df %>% as_tibble()
# create data frame with ordered factors
df_ordered <- df %>%
select(vars_metric) %>%
mutate_all(funs(ordered)) %>%
as.data.frame()
df_numeric <- df %>%
select(vars_metric) %>%
as.data.frame()
# calculate polychoric correlations
cor_poly <- polycor::hetcor(df_ordered, use ="pairwise.complete.obs", ML = FALSE, std.err=FALSE)
#----------------------------
# permuted parallel analysis to test the unidimensionality
eigenvalues <- nFactors::eigenComputes(x=df_numeric, use="pairwise.complete.obs")
# Permutation parallel analysis distribution
parallel_analysis <- nFactors::eigenBootParallel(x=df_numeric, quantile=0.95, nboot=30, option="permutation",
cor=TRUE, model="components", use="pairwise.complete.obs")$quantile
# number of components to retain
results_scree <- nFactors::nScree(x = eigenvalues, aparallel = parallel_analysis)
n_group_factors <- results_scree$Components$nparallel
#----------------------------
# bi-factor analysis to test the unidimensionality
fa_bifactor <- try(psych::fa(cor_poly$correlations,n_group_factors+1,rotate="bifactor"), silent=TRUE) # bi-factor model
fa_onefactor <- psych::fa(cor_poly$correlations,1,rotate="bifactor") # single factor model
if (any(class(fa_bifactor)=="try-error")) message("Bi-factor model unable to be computed--it is likely there are not 2 factors")
#------------------------------------
# local dependency based on polychoric correlations of the items
fa_resid <- psych::factor.residuals(cor_poly$correlations,fa_onefactor)
# PRINT RESULTS
if (print_results) {
if (is.null(path_output)) stop("You need to give an path for the output")
# polychoric correlations
save(cor_poly, file = paste0(path_output,"/cor_poly.RData"))
utils::write.csv(round(cor_poly$correlations, 3), file=paste0(path_output,"/cor_poly.csv"))
# scree plot
grDevices::pdf(file=paste0(path_output,"/parallel_analysis_scree.pdf"), width=7, height=7)
nFactors::plotnScree(results_scree)
grDevices::dev.off()
#bi-factor plot
if (!any(class(fa_bifactor)=="try-error")) { #if fa_bifactor was able to be computed
#create vector of possible colors
col_factors <- RColorBrewer::brewer.pal(ncol(fa_bifactor$loadings),"Spectral")
#create pdf of bifactor analysis
grDevices::pdf(file=paste0(path_output,"/bifactor_analysis.pdf"), width=7, height=7)
# par(col="black", mar=c(13, 4, 4, 2) + 0.1)
graphics::plot(fa_bifactor$loadings[,1], type="l", ylim=c(-0.5,1), lwd=1.5, col="black", xaxt="n", xlab="", ylab="Loadings" )
graphics::axis(side=1, at = 1:length(vars_metric), labels = vars_metric, las=2)
for(i in 2:ncol(fa_bifactor$loadings)){
graphics::lines(fa_bifactor$loadings[,i], col=col_factors[i], lwd=1.3)
}
graphics::lines(fa_onefactor$loadings[,1], col="black", lty="dotted", lwd=1.5)
grDevices::dev.off()
utils::write.csv(cbind(fa_bifactor$loadings, fa_onefactor$loadings), file=paste0(path_output,"/bifactor_loadings.csv"))
} else { #if fa_bifactor was not able to be computed
utils::write.csv(unclass(fa_onefactor$loadings), file=paste0(path_output,"/bifactor_loadings.csv"))
}
# local dependency based on polychoric correlations of the items
utils::write.csv(round(fa_resid,3), file=paste0(path_output,"/fa_resid.csv"))
}
factor_result <- list(cor_poly = cor_poly,
eigenvalues = eigenvalues,
parallel_analysis = parallel_analysis,
results_scree = results_scree,
n_group_factors = n_group_factors,
fa_onefactor = fa_onefactor,
fa_resid = fa_resid)
return(factor_result)
}
|
/R/rasch_factor.R
|
no_license
|
CarolinaFellinghauer/whomds
|
R
| false
| false
| 5,401
|
r
|
#' Calculate a factor analysis for a Rasch Model
#'
#' @param df a data frame of individual survey data, where each row is an individual
#' @param vars_metric a character vector of items to use in the Rasch Analysis
#' @param print_results a logical vector indicating whether to print the results of the model to the \code{model_name} directory
#' @param path_output a string with the path to the output folder. Default is NULL.
#'
#' @return a list with results from the factor analysis for a Rasch Model:
#' \item{cor_poly}{the matrix of polychoric correlations}
#' \item{eigenvalues}{the eigenvalues}
#' \item{parallel_analysis}{permutation parallel analysis distribution}
#' \item{results_scree}{results of a scree analysis}
#' \item{n_group_factors}{number of factors from the parallel analysis in the scree analysis}
#' \item{fa_onefactor}{results from factor analysis with one factor}
#' \item{fa_resid}{local dependency based on polychoric correlations of the items}
#'
#' @details Unidimensionality of the data is one of the core assumptions of the Rasch Model. This function performs the factor analysis to assess the unidimensionality of the data.
#'
#' @family rasch functions
#'
#' @export
#'
#' @import dplyr
rasch_factor <- function(df, vars_metric, print_results = TRUE, path_output = NULL) {
#----------------------------
#convert to tibble
if (!tibble::is_tibble(df)) df <- df %>% as_tibble()
# create data frame with ordered factors
df_ordered <- df %>%
select(vars_metric) %>%
mutate_all(funs(ordered)) %>%
as.data.frame()
df_numeric <- df %>%
select(vars_metric) %>%
as.data.frame()
# calculate polychoric correlations
cor_poly <- polycor::hetcor(df_ordered, use ="pairwise.complete.obs", ML = FALSE, std.err=FALSE)
#----------------------------
# permuted parallel analysis to test the unidimensionality
eigenvalues <- nFactors::eigenComputes(x=df_numeric, use="pairwise.complete.obs")
# Permutation parallel analysis distribution
parallel_analysis <- nFactors::eigenBootParallel(x=df_numeric, quantile=0.95, nboot=30, option="permutation",
cor=TRUE, model="components", use="pairwise.complete.obs")$quantile
# number of components to retain
results_scree <- nFactors::nScree(x = eigenvalues, aparallel = parallel_analysis)
n_group_factors <- results_scree$Components$nparallel
#----------------------------
# bi-factor analysis to test the unidimensionality
fa_bifactor <- try(psych::fa(cor_poly$correlations,n_group_factors+1,rotate="bifactor"), silent=TRUE) # bi-factor model
fa_onefactor <- psych::fa(cor_poly$correlations,1,rotate="bifactor") # single factor model
if (any(class(fa_bifactor)=="try-error")) message("Bi-factor model unable to be computed--it is likely there are not 2 factors")
#------------------------------------
# local dependency based on polychoric correlations of the items
fa_resid <- psych::factor.residuals(cor_poly$correlations,fa_onefactor)
# PRINT RESULTS
if (print_results) {
if (is.null(path_output)) stop("You need to give an path for the output")
# polychoric correlations
save(cor_poly, file = paste0(path_output,"/cor_poly.RData"))
utils::write.csv(round(cor_poly$correlations, 3), file=paste0(path_output,"/cor_poly.csv"))
# scree plot
grDevices::pdf(file=paste0(path_output,"/parallel_analysis_scree.pdf"), width=7, height=7)
nFactors::plotnScree(results_scree)
grDevices::dev.off()
#bi-factor plot
if (!any(class(fa_bifactor)=="try-error")) { #if fa_bifactor was able to be computed
#create vector of possible colors
col_factors <- RColorBrewer::brewer.pal(ncol(fa_bifactor$loadings),"Spectral")
#create pdf of bifactor analysis
grDevices::pdf(file=paste0(path_output,"/bifactor_analysis.pdf"), width=7, height=7)
# par(col="black", mar=c(13, 4, 4, 2) + 0.1)
graphics::plot(fa_bifactor$loadings[,1], type="l", ylim=c(-0.5,1), lwd=1.5, col="black", xaxt="n", xlab="", ylab="Loadings" )
graphics::axis(side=1, at = 1:length(vars_metric), labels = vars_metric, las=2)
for(i in 2:ncol(fa_bifactor$loadings)){
graphics::lines(fa_bifactor$loadings[,i], col=col_factors[i], lwd=1.3)
}
graphics::lines(fa_onefactor$loadings[,1], col="black", lty="dotted", lwd=1.5)
grDevices::dev.off()
utils::write.csv(cbind(fa_bifactor$loadings, fa_onefactor$loadings), file=paste0(path_output,"/bifactor_loadings.csv"))
} else { #if fa_bifactor was not able to be computed
utils::write.csv(unclass(fa_onefactor$loadings), file=paste0(path_output,"/bifactor_loadings.csv"))
}
# local dependency based on polychoric correlations of the items
utils::write.csv(round(fa_resid,3), file=paste0(path_output,"/fa_resid.csv"))
}
factor_result <- list(cor_poly = cor_poly,
eigenvalues = eigenvalues,
parallel_analysis = parallel_analysis,
results_scree = results_scree,
n_group_factors = n_group_factors,
fa_onefactor = fa_onefactor,
fa_resid = fa_resid)
return(factor_result)
}
|
#' summary.mm
#'
#' "summary.mm" is used to display the results of the mediation analyzes done with "multimediate".
#'
#'
#' @param object element of the class "mm".
#' @param opt a character string indicating the details of the analysis "navg" for the average causal effects for t=0,1 and "avg" for the average causal effects.
#' @param logit a character string indicating, when the outcome is binary, the scale of the average causal effects. "effects" for average causal effects, " OR" average causal effects on OR scale, "logOR" average causal effects on logOR scale and "all" for all scale.
#' @param ... additional arguments affecting the summary produced
#'
#' @return table summarizing the causal analysis
#' @export
#'
summary.mm = function(object,opt="navg",logit="all",...){
nom.navg=c("ACME.joint.treat","PM(treat)","ACME.joint.control","PM(control)",paste(c("ACME.treat.","PM(treat).","ACME.control.","PM(control)."),rep(object$mediator,each=4),sep=""),"ADE.treat","ADE.control","Total Effect")
nom.avg=c("ACME.joint","PM.joint",paste(c("ACME.","PM."),rep(object$mediator,each=2),sep=""),"ADE","Total Effect")
if (length(object$mediator)>1){
navg=data.frame("." =nom.navg,
Estimation=round(c(object$d1,object$n1,object$d0,object$n0,triout.NM(object$d1.NM,object$n1.NM,object$d0.NM,object$n0.NM),object$z1,object$z0,object$tau.coef),4),
IC.inf =round(c(object$d1.ci[1],object$n1.ci[1],object$d0.ci[1],object$n0.ci[1],triout.ci.NM(object$d1.ci.NM,object$n1.ci.NM,object$d0.ci.NM,object$n0.ci.NM)[,1],object$z1.ci[1],object$z0.ci[1],object$tau.ci[1]),4),
IC.sup =round(c(object$d1.ci[2],object$n1.ci[2],object$d0.ci[2],object$n0.ci[2],triout.ci.NM(object$d1.ci.NM,object$n1.ci.NM,object$d0.ci.NM,object$n0.ci.NM)[,2],object$z1.ci[2],object$z0.ci[2],object$tau.ci[2]),4),
P.val =round(c(object$d1.p,object$n1.p,object$d0.p,object$n0.p,triout.NM(object$d1.p.NM,object$n1.p.NM,object$d0.p.NM,object$n0.p.NM),object$z1.p,object$z0.p,object$tau.p),4)
)
avg=data.frame("." =nom.avg,
Estimation=round(c(object$d.avg,object$n.avg,triout.avg.NM(object$d.avg.NM,object$n.avg.NM),object$z.avg,object$tau.coef),4),
IC.inf =round(c(object$d.avg.ci[1],object$n.avg.ci[1],triout.ci.avg.NM(object$d.avg.ci.NM,object$n.avg.ci.NM)[,1],object$z.avg.ci[1],object$tau.ci[1]),4),
IC.sup =round(c(object$d.avg.ci[2],object$n.avg.ci[2],triout.ci.avg.NM(object$d.avg.ci.NM,object$n.avg.ci.NM)[,2],object$z.avg.ci[2],object$tau.ci[2]),4),
P.val =round(c(object$d.avg.p,object$n.avg.p,triout.avg.NM(object$d.avg.p.NM,object$n.avg.p.NM),object$z.avg.p,object$tau.p),4)
)
if (!is.null(object$model.y$family)){
if (object$model.y$family$link=="logit"){
warning("The proportions mediated on the OR scale can be considered if the outcome is rare, otherwise the proportions mediated on effects scale and/or logOR scale have to be considered.")
ORnavg=data.frame("." =paste("OR",nom.navg),
Estimation=round(c(object$ORd1,object$ORn1,object$ORd0,object$ORn0,triout.NM(object$ORd1.NM,object$ORn1.NM,object$ORd0.NM,object$ORn0.NM),object$ORz1,object$ORz0,object$ORtau.coef),4),
IC.inf =round(c(object$ORd1.ci[1],object$ORn1.ci[1],object$ORd0.ci[1],object$ORn0.ci[1],triout.ci.NM(object$ORd1.ci.NM,object$ORn1.ci.NM,object$ORd0.ci.NM,object$ORn0.ci.NM)[,1],object$ORz1.ci[1],object$ORz0.ci[1],object$ORtau.ci[1]),4),
IC.sup =round(c(object$ORd1.ci[2],object$ORn1.ci[2],object$ORd0.ci[2],object$ORn0.ci[2],triout.ci.NM(object$ORd1.ci.NM,object$ORn1.ci.NM,object$ORd0.ci.NM,object$ORn0.ci.NM)[,2],object$ORz1.ci[2],object$ORz0.ci[2],object$ORtau.ci[2]),4),
P.val =round(c(object$ORd1.p,object$ORn1.p,object$ORd0.p,object$ORn0.p,triout.NM(object$ORd1.p.NM,object$ORn1.p.NM,object$ORd0.p.NM,object$ORn0.p.NM),object$ORz1.p,object$ORz0.p,object$ORtau.p),4)
)
ORavg=data.frame("." =paste("OR",nom.avg),
Estimation=round(c(object$ORd.avg ,object$ORn.avg ,triout.avg.NM( object$ORd.avg.NM ,object$ORn.avg.NM) ,object$ORz.avg ,object$ORtau.coef) ,4),
IC.inf =round(c(object$ORd.avg.ci[1],object$ORn.avg.ci[1],triout.ci.avg.NM(object$ORd.avg.ci.NM ,object$ORn.avg.ci.NM)[,1],object$ORz.avg.ci[1],object$ORtau.ci[1]),4),
IC.sup =round(c(object$ORd.avg.ci[2],object$ORn.avg.ci[2],triout.ci.avg.NM(object$ORd.avg.ci.NM ,object$ORn.avg.ci.NM)[,2],object$ORz.avg.ci[2],object$ORtau.ci[2]),4),
P.val =round(c(object$ORd.avg.p ,object$ORn.avg.p ,triout.avg.NM( object$ORd.avg.p.NM ,object$ORn.avg.p.NM) ,object$ORz.avg.p ,object$ORtau.p) ,4)
)
logORnavg=data.frame("." =paste("logOR",nom.navg),
Estimation=round(c(object$logORd1, object$logORn1, object$logORd0, object$logORn0, triout.NM( object$logORd1.NM, object$logORn1.NM, object$logORd0.NM, object$logORn0.NM), object$logORz1, object$logORz0, object$logORtau.coef), 4),
IC.inf =round(c(object$logORd1.ci[1], object$logORn1.ci[1],object$logORd0.ci[1],object$logORn0.ci[1],triout.ci.NM(object$logORd1.ci.NM,object$logORn1.ci.NM,object$logORd0.ci.NM,object$logORn0.ci.NM)[,1],object$logORz1.ci[1],object$logORz0.ci[1],object$logORtau.ci[1]),4),
IC.sup =round(c(object$logORd1.ci[2], object$logORn1.ci[2],object$logORd0.ci[2],object$logORn0.ci[2],triout.ci.NM(object$logORd1.ci.NM,object$logORn1.ci.NM,object$logORd0.ci.NM,object$logORn0.ci.NM)[,2],object$logORz1.ci[2],object$logORz0.ci[2],object$logORtau.ci[2]),4),
P.val =round(c(object$logORd1.p, object$logORn1.p, object$logORd0.p, object$logORn0.p, triout.NM( object$logORd1.p.NM, object$logORn1.p.NM, object$logORd0.p.NM, object$logORn0.p.NM), object$logORz1.p, object$logORz0.p, object$logORtau.p), 4)
)
logORavg=data.frame("." =paste("logOR",nom.avg),
Estimation=round(c(object$logORd.avg,object$logORn.avg,triout.avg.NM(object$logORd.avg.NM,object$logORn.avg.NM),object$logORz.avg,object$logORtau.coef),4),
IC.inf =round(c(object$logORd.avg.ci[1],object$logORn.avg.ci[1],triout.ci.avg.NM(object$logORd.avg.ci.NM,object$logORn.avg.ci.NM)[,1],object$logORz.avg.ci[1],object$logORtau.ci[1]),4),
IC.sup =round(c(object$logORd.avg.ci[2],object$logORn.avg.ci[2],triout.ci.avg.NM(object$logORd.avg.ci.NM,object$logORn.avg.ci.NM)[,2],object$logORz.avg.ci[2],object$logORtau.ci[2]),4),
P.val =round(c(object$logORd.avg.p,object$logORn.avg.p,triout.avg.NM(object$logORd.avg.p.NM,object$logORn.avg.p.NM),object$logORz.avg.p,object$logORtau.p),4)
)
}}
pmtest= c(object$d1,object$d0,object$d1.NM,object$d0.NM,object$z1,object$z0,object$tau.coef)
if(sum(pmtest>0)!=length(pmtest)){
warning("Proportion mediated warning : This quantity makes sense only when the sign the causal mediated effects (i.e., the numerator) are the same as the sign of the average total effect (i.e., the denominator) and when the total effect is bigger than the mediated effect. Confidence intervals may be meaningless when the mediated and/or direct effects have a different sign.")
}
}
else {
navg=data.frame("." =c("ACME.treat","PM(treat)","ACME.control","PM(control)","ADE.treat","ADE.control","Total Effect"),
Estimation=round(c(object$d1,object$n1,object$d0,object$n0,object$z1,object$z0,object$tau.coef),4),
IC.inf =round(c(object$d1.ci[1],object$n1.ci[1],object$d0.ci[1],object$n0.ci[1],object$z1.ci[1],object$z0.ci[1],object$tau.ci[1]),4),
IC.sup =round(c(object$d1.ci[2],object$n1.ci[2],object$d0.ci[2],object$n0.ci[2],object$z1.ci[2],object$z0.ci[2],object$tau.ci[2]),4),
P.val =round(c(object$d1.p,object$n1.p,object$d0.p,object$n0.p,object$z1.p,object$z0.p,object$tau.p),4)
)
avg=data.frame("." =c("ACME","PM","ADE","Total Effect"),
Estimation=round(c(object$d.avg,object$n.avg,object$z.avg,object$tau.coef),4),
IC.inf =round(c(object$d.avg.ci[1],object$n.avg.ci[1],object$z.avg.ci[1],object$tau.ci[1]),4),
IC.sup =round(c(object$d.avg.ci[2],object$n.avg.ci[2],object$z.avg.ci[2],object$tau.ci[2]),4),
P.val =round(c(object$d.avg.p,object$n.avg.p,object$z.avg.p,object$tau.p),4)
)
if (!is.null(object$model.y$family)){
if (object$model.y$family$link=="logit"){
warning("The proportion mediated on the OR scale can be considered if the outcome is rare, otherwise the proportion mediated on effect scale and/or logOR scale have to be considered.")
ORnavg=data.frame("." =c("OR.ACME.treat","OR.PM(treat)","OR.ACME.control","OR.PM(control)","OR.ADE.treat","OR.ADE.control","OR.Total Effect"),
Estimation=round(c(object$ORd1,object$ORn1,object$ORd0,object$ORn0,object$ORz1,object$ORz0,object$ORtau.coef),4),
IC.inf =round(c(object$ORd1.ci[1],object$ORn1.ci[1],object$ORd0.ci[1],object$ORn0.ci[1],object$ORz1.ci[1],object$ORz0.ci[1],object$ORtau.ci[1]),4),
IC.sup =round(c(object$ORd1.ci[2],object$ORn1.ci[2],object$ORd0.ci[2],object$ORn0.ci[2],object$ORz1.ci[2],object$ORz0.ci[2],object$ORtau.ci[2]),4),
P.val =round(c(object$ORd1.p,object$ORn1.p,object$ORd0.p,object$ORn0.p,object$ORz1.p,object$ORz0.p,object$ORtau.p),4)
)
ORavg=data.frame("." =c("OR.ACME","OR.PM","OR.ADE","OR.Total Effect"),
Estimation=round(c(object$ORd.avg,object$ORn.avg,object$ORz.avg,object$ORtau.coef),4),
IC.inf =round(c(object$ORd.avg.ci[1],object$ORn.avg.ci[1],object$ORz.avg.ci[1],object$ORtau.ci[1]),4),
IC.sup =round(c(object$ORd.avg.ci[2],object$ORn.avg.ci[2],object$ORz.avg.ci[2],object$ORtau.ci[2]),4),
P.val =round(c(object$ORd.avg.p,object$ORn.avg.p,object$ORz.avg.p,object$ORtau.p),4)
)
logORnavg=data.frame("." =c("logOR.ACME.treat","logOR.PM(treat)","logOR.ACME.control","logOR.PM(control)","logOR.ADE.treat","logOR.ADE.control","logOR.Total Effect"),
Estimation=round(c(object$logORd1,object$logORn1,object$logORd0,object$logORn0,object$logORz1,object$logORz0,object$logORtau.coef),4),
IC.inf =round(c(object$logORd1.ci[1],object$logORn1.ci[1],object$logORd0.ci[1],object$logORn0.ci[1],object$logORz1.ci[1],object$logORz0.ci[1],object$logORtau.ci[1]),4),
IC.sup =round(c(object$logORd1.ci[2],object$logORn1.ci[2],object$logORd0.ci[2],object$logORn0.ci[2],object$logORz1.ci[2],object$logORz0.ci[2],object$logORtau.ci[2]),4),
P.val =round(c(object$logORd1.p,object$logORn1.p,object$logORd0.p,object$logORn0.p,object$logORz1.p,object$logORz0.p,object$logORtau.p),4)
)
logORavg=data.frame("." =c("logOR.ACME","logOR.PM","logOR.ADE","logOR.Total Effect"),
Estimation=round(c(object$logORd.avg,object$logORn.avg,object$logORz.avg,object$logORtau.coef),4),
IC.inf =round(c(object$logORd.avg.ci[1],object$logORn.avg.ci[1],object$logORz.avg.ci[1],object$logORtau.ci[1]),4),
IC.sup =round(c(object$logORd.avg.ci[2],object$logORn.avg.ci[2],object$logORz.avg.ci[2],object$logORtau.ci[2]),4),
P.val =round(c(object$logORd.avg.p,object$logORn.avg.p,object$logORz.avg.p,object$logORtau.p),4)
)
}}
pmtest= c(object$d1,object$d0,object$z1,object$z0,object$tau.coef)
if(sum(pmtest>0)!=length(pmtest)){
warning("Proportion mediated warning : This quantity makes sense only when the sign the causal mediated effects (i.e., the numerator) are the same as the sign of the average total effect (i.e., the denominator) and when the total effect is bigger than the mediated effect. Confidence intervals may be meaningless when the mediated and/or direct effects have a different sign.")
}
}
if (opt=="avg"){
if (is.null(object$model.y$family) || object$model.y$family$link=="probit")
{res=avg}
else{
if (object$model.y$family$link=="logit" & logit=="all"){
res=cbind(avg,ORavg,logORavg)
}
else if (object$model.y$family$link=="logit" & logit=="OR"){
res=ORavg
}
else if (object$model.y$family$link=="logit" & logit=="logOR"){
res=logORavg
}
else{
res=avg
}
}
}
else{
if (is.null(object$model.y$family) || object$model.y$family$link=="probit")
{res=navg}
else {
if (object$model.y$family$link=="logit" & logit=="all"){
res=cbind(navg,ORnavg,logORnavg)
}
else if (object$model.y$family$link=="logit" & logit=="OR"){
res=ORnavg
}
else if (object$model.y$family$link=="logit" & logit=="logOR"){
res=logORnavg
}
else{
res=navg
}
}
}
return(res)
}
|
/R/summary.mm.R
|
no_license
|
AllanJe/multimediate
|
R
| false
| false
| 13,705
|
r
|
#' summary.mm
#'
#' "summary.mm" is used to display the results of the mediation analyzes done with "multimediate".
#'
#'
#' @param object element of the class "mm".
#' @param opt a character string indicating the details of the analysis "navg" for the average causal effects for t=0,1 and "avg" for the average causal effects.
#' @param logit a character string indicating, when the outcome is binary, the scale of the average causal effects. "effects" for average causal effects, " OR" average causal effects on OR scale, "logOR" average causal effects on logOR scale and "all" for all scale.
#' @param ... additional arguments affecting the summary produced
#'
#' @return table summarizing the causal analysis
#' @export
#'
summary.mm = function(object,opt="navg",logit="all",...){
nom.navg=c("ACME.joint.treat","PM(treat)","ACME.joint.control","PM(control)",paste(c("ACME.treat.","PM(treat).","ACME.control.","PM(control)."),rep(object$mediator,each=4),sep=""),"ADE.treat","ADE.control","Total Effect")
nom.avg=c("ACME.joint","PM.joint",paste(c("ACME.","PM."),rep(object$mediator,each=2),sep=""),"ADE","Total Effect")
if (length(object$mediator)>1){
navg=data.frame("." =nom.navg,
Estimation=round(c(object$d1,object$n1,object$d0,object$n0,triout.NM(object$d1.NM,object$n1.NM,object$d0.NM,object$n0.NM),object$z1,object$z0,object$tau.coef),4),
IC.inf =round(c(object$d1.ci[1],object$n1.ci[1],object$d0.ci[1],object$n0.ci[1],triout.ci.NM(object$d1.ci.NM,object$n1.ci.NM,object$d0.ci.NM,object$n0.ci.NM)[,1],object$z1.ci[1],object$z0.ci[1],object$tau.ci[1]),4),
IC.sup =round(c(object$d1.ci[2],object$n1.ci[2],object$d0.ci[2],object$n0.ci[2],triout.ci.NM(object$d1.ci.NM,object$n1.ci.NM,object$d0.ci.NM,object$n0.ci.NM)[,2],object$z1.ci[2],object$z0.ci[2],object$tau.ci[2]),4),
P.val =round(c(object$d1.p,object$n1.p,object$d0.p,object$n0.p,triout.NM(object$d1.p.NM,object$n1.p.NM,object$d0.p.NM,object$n0.p.NM),object$z1.p,object$z0.p,object$tau.p),4)
)
avg=data.frame("." =nom.avg,
Estimation=round(c(object$d.avg,object$n.avg,triout.avg.NM(object$d.avg.NM,object$n.avg.NM),object$z.avg,object$tau.coef),4),
IC.inf =round(c(object$d.avg.ci[1],object$n.avg.ci[1],triout.ci.avg.NM(object$d.avg.ci.NM,object$n.avg.ci.NM)[,1],object$z.avg.ci[1],object$tau.ci[1]),4),
IC.sup =round(c(object$d.avg.ci[2],object$n.avg.ci[2],triout.ci.avg.NM(object$d.avg.ci.NM,object$n.avg.ci.NM)[,2],object$z.avg.ci[2],object$tau.ci[2]),4),
P.val =round(c(object$d.avg.p,object$n.avg.p,triout.avg.NM(object$d.avg.p.NM,object$n.avg.p.NM),object$z.avg.p,object$tau.p),4)
)
if (!is.null(object$model.y$family)){
if (object$model.y$family$link=="logit"){
warning("The proportions mediated on the OR scale can be considered if the outcome is rare, otherwise the proportions mediated on effects scale and/or logOR scale have to be considered.")
ORnavg=data.frame("." =paste("OR",nom.navg),
Estimation=round(c(object$ORd1,object$ORn1,object$ORd0,object$ORn0,triout.NM(object$ORd1.NM,object$ORn1.NM,object$ORd0.NM,object$ORn0.NM),object$ORz1,object$ORz0,object$ORtau.coef),4),
IC.inf =round(c(object$ORd1.ci[1],object$ORn1.ci[1],object$ORd0.ci[1],object$ORn0.ci[1],triout.ci.NM(object$ORd1.ci.NM,object$ORn1.ci.NM,object$ORd0.ci.NM,object$ORn0.ci.NM)[,1],object$ORz1.ci[1],object$ORz0.ci[1],object$ORtau.ci[1]),4),
IC.sup =round(c(object$ORd1.ci[2],object$ORn1.ci[2],object$ORd0.ci[2],object$ORn0.ci[2],triout.ci.NM(object$ORd1.ci.NM,object$ORn1.ci.NM,object$ORd0.ci.NM,object$ORn0.ci.NM)[,2],object$ORz1.ci[2],object$ORz0.ci[2],object$ORtau.ci[2]),4),
P.val =round(c(object$ORd1.p,object$ORn1.p,object$ORd0.p,object$ORn0.p,triout.NM(object$ORd1.p.NM,object$ORn1.p.NM,object$ORd0.p.NM,object$ORn0.p.NM),object$ORz1.p,object$ORz0.p,object$ORtau.p),4)
)
ORavg=data.frame("." =paste("OR",nom.avg),
Estimation=round(c(object$ORd.avg ,object$ORn.avg ,triout.avg.NM( object$ORd.avg.NM ,object$ORn.avg.NM) ,object$ORz.avg ,object$ORtau.coef) ,4),
IC.inf =round(c(object$ORd.avg.ci[1],object$ORn.avg.ci[1],triout.ci.avg.NM(object$ORd.avg.ci.NM ,object$ORn.avg.ci.NM)[,1],object$ORz.avg.ci[1],object$ORtau.ci[1]),4),
IC.sup =round(c(object$ORd.avg.ci[2],object$ORn.avg.ci[2],triout.ci.avg.NM(object$ORd.avg.ci.NM ,object$ORn.avg.ci.NM)[,2],object$ORz.avg.ci[2],object$ORtau.ci[2]),4),
P.val =round(c(object$ORd.avg.p ,object$ORn.avg.p ,triout.avg.NM( object$ORd.avg.p.NM ,object$ORn.avg.p.NM) ,object$ORz.avg.p ,object$ORtau.p) ,4)
)
logORnavg=data.frame("." =paste("logOR",nom.navg),
Estimation=round(c(object$logORd1, object$logORn1, object$logORd0, object$logORn0, triout.NM( object$logORd1.NM, object$logORn1.NM, object$logORd0.NM, object$logORn0.NM), object$logORz1, object$logORz0, object$logORtau.coef), 4),
IC.inf =round(c(object$logORd1.ci[1], object$logORn1.ci[1],object$logORd0.ci[1],object$logORn0.ci[1],triout.ci.NM(object$logORd1.ci.NM,object$logORn1.ci.NM,object$logORd0.ci.NM,object$logORn0.ci.NM)[,1],object$logORz1.ci[1],object$logORz0.ci[1],object$logORtau.ci[1]),4),
IC.sup =round(c(object$logORd1.ci[2], object$logORn1.ci[2],object$logORd0.ci[2],object$logORn0.ci[2],triout.ci.NM(object$logORd1.ci.NM,object$logORn1.ci.NM,object$logORd0.ci.NM,object$logORn0.ci.NM)[,2],object$logORz1.ci[2],object$logORz0.ci[2],object$logORtau.ci[2]),4),
P.val =round(c(object$logORd1.p, object$logORn1.p, object$logORd0.p, object$logORn0.p, triout.NM( object$logORd1.p.NM, object$logORn1.p.NM, object$logORd0.p.NM, object$logORn0.p.NM), object$logORz1.p, object$logORz0.p, object$logORtau.p), 4)
)
logORavg=data.frame("." =paste("logOR",nom.avg),
Estimation=round(c(object$logORd.avg,object$logORn.avg,triout.avg.NM(object$logORd.avg.NM,object$logORn.avg.NM),object$logORz.avg,object$logORtau.coef),4),
IC.inf =round(c(object$logORd.avg.ci[1],object$logORn.avg.ci[1],triout.ci.avg.NM(object$logORd.avg.ci.NM,object$logORn.avg.ci.NM)[,1],object$logORz.avg.ci[1],object$logORtau.ci[1]),4),
IC.sup =round(c(object$logORd.avg.ci[2],object$logORn.avg.ci[2],triout.ci.avg.NM(object$logORd.avg.ci.NM,object$logORn.avg.ci.NM)[,2],object$logORz.avg.ci[2],object$logORtau.ci[2]),4),
P.val =round(c(object$logORd.avg.p,object$logORn.avg.p,triout.avg.NM(object$logORd.avg.p.NM,object$logORn.avg.p.NM),object$logORz.avg.p,object$logORtau.p),4)
)
}}
pmtest= c(object$d1,object$d0,object$d1.NM,object$d0.NM,object$z1,object$z0,object$tau.coef)
if(sum(pmtest>0)!=length(pmtest)){
warning("Proportion mediated warning : This quantity makes sense only when the sign the causal mediated effects (i.e., the numerator) are the same as the sign of the average total effect (i.e., the denominator) and when the total effect is bigger than the mediated effect. Confidence intervals may be meaningless when the mediated and/or direct effects have a different sign.")
}
}
else {
navg=data.frame("." =c("ACME.treat","PM(treat)","ACME.control","PM(control)","ADE.treat","ADE.control","Total Effect"),
Estimation=round(c(object$d1,object$n1,object$d0,object$n0,object$z1,object$z0,object$tau.coef),4),
IC.inf =round(c(object$d1.ci[1],object$n1.ci[1],object$d0.ci[1],object$n0.ci[1],object$z1.ci[1],object$z0.ci[1],object$tau.ci[1]),4),
IC.sup =round(c(object$d1.ci[2],object$n1.ci[2],object$d0.ci[2],object$n0.ci[2],object$z1.ci[2],object$z0.ci[2],object$tau.ci[2]),4),
P.val =round(c(object$d1.p,object$n1.p,object$d0.p,object$n0.p,object$z1.p,object$z0.p,object$tau.p),4)
)
avg=data.frame("." =c("ACME","PM","ADE","Total Effect"),
Estimation=round(c(object$d.avg,object$n.avg,object$z.avg,object$tau.coef),4),
IC.inf =round(c(object$d.avg.ci[1],object$n.avg.ci[1],object$z.avg.ci[1],object$tau.ci[1]),4),
IC.sup =round(c(object$d.avg.ci[2],object$n.avg.ci[2],object$z.avg.ci[2],object$tau.ci[2]),4),
P.val =round(c(object$d.avg.p,object$n.avg.p,object$z.avg.p,object$tau.p),4)
)
if (!is.null(object$model.y$family)){
if (object$model.y$family$link=="logit"){
warning("The proportion mediated on the OR scale can be considered if the outcome is rare, otherwise the proportion mediated on effect scale and/or logOR scale have to be considered.")
ORnavg=data.frame("." =c("OR.ACME.treat","OR.PM(treat)","OR.ACME.control","OR.PM(control)","OR.ADE.treat","OR.ADE.control","OR.Total Effect"),
Estimation=round(c(object$ORd1,object$ORn1,object$ORd0,object$ORn0,object$ORz1,object$ORz0,object$ORtau.coef),4),
IC.inf =round(c(object$ORd1.ci[1],object$ORn1.ci[1],object$ORd0.ci[1],object$ORn0.ci[1],object$ORz1.ci[1],object$ORz0.ci[1],object$ORtau.ci[1]),4),
IC.sup =round(c(object$ORd1.ci[2],object$ORn1.ci[2],object$ORd0.ci[2],object$ORn0.ci[2],object$ORz1.ci[2],object$ORz0.ci[2],object$ORtau.ci[2]),4),
P.val =round(c(object$ORd1.p,object$ORn1.p,object$ORd0.p,object$ORn0.p,object$ORz1.p,object$ORz0.p,object$ORtau.p),4)
)
ORavg=data.frame("." =c("OR.ACME","OR.PM","OR.ADE","OR.Total Effect"),
Estimation=round(c(object$ORd.avg,object$ORn.avg,object$ORz.avg,object$ORtau.coef),4),
IC.inf =round(c(object$ORd.avg.ci[1],object$ORn.avg.ci[1],object$ORz.avg.ci[1],object$ORtau.ci[1]),4),
IC.sup =round(c(object$ORd.avg.ci[2],object$ORn.avg.ci[2],object$ORz.avg.ci[2],object$ORtau.ci[2]),4),
P.val =round(c(object$ORd.avg.p,object$ORn.avg.p,object$ORz.avg.p,object$ORtau.p),4)
)
logORnavg=data.frame("." =c("logOR.ACME.treat","logOR.PM(treat)","logOR.ACME.control","logOR.PM(control)","logOR.ADE.treat","logOR.ADE.control","logOR.Total Effect"),
Estimation=round(c(object$logORd1,object$logORn1,object$logORd0,object$logORn0,object$logORz1,object$logORz0,object$logORtau.coef),4),
IC.inf =round(c(object$logORd1.ci[1],object$logORn1.ci[1],object$logORd0.ci[1],object$logORn0.ci[1],object$logORz1.ci[1],object$logORz0.ci[1],object$logORtau.ci[1]),4),
IC.sup =round(c(object$logORd1.ci[2],object$logORn1.ci[2],object$logORd0.ci[2],object$logORn0.ci[2],object$logORz1.ci[2],object$logORz0.ci[2],object$logORtau.ci[2]),4),
P.val =round(c(object$logORd1.p,object$logORn1.p,object$logORd0.p,object$logORn0.p,object$logORz1.p,object$logORz0.p,object$logORtau.p),4)
)
logORavg=data.frame("." =c("logOR.ACME","logOR.PM","logOR.ADE","logOR.Total Effect"),
Estimation=round(c(object$logORd.avg,object$logORn.avg,object$logORz.avg,object$logORtau.coef),4),
IC.inf =round(c(object$logORd.avg.ci[1],object$logORn.avg.ci[1],object$logORz.avg.ci[1],object$logORtau.ci[1]),4),
IC.sup =round(c(object$logORd.avg.ci[2],object$logORn.avg.ci[2],object$logORz.avg.ci[2],object$logORtau.ci[2]),4),
P.val =round(c(object$logORd.avg.p,object$logORn.avg.p,object$logORz.avg.p,object$logORtau.p),4)
)
}}
pmtest= c(object$d1,object$d0,object$z1,object$z0,object$tau.coef)
if(sum(pmtest>0)!=length(pmtest)){
warning("Proportion mediated warning : This quantity makes sense only when the sign the causal mediated effects (i.e., the numerator) are the same as the sign of the average total effect (i.e., the denominator) and when the total effect is bigger than the mediated effect. Confidence intervals may be meaningless when the mediated and/or direct effects have a different sign.")
}
}
if (opt=="avg"){
if (is.null(object$model.y$family) || object$model.y$family$link=="probit")
{res=avg}
else{
if (object$model.y$family$link=="logit" & logit=="all"){
res=cbind(avg,ORavg,logORavg)
}
else if (object$model.y$family$link=="logit" & logit=="OR"){
res=ORavg
}
else if (object$model.y$family$link=="logit" & logit=="logOR"){
res=logORavg
}
else{
res=avg
}
}
}
else{
if (is.null(object$model.y$family) || object$model.y$family$link=="probit")
{res=navg}
else {
if (object$model.y$family$link=="logit" & logit=="all"){
res=cbind(navg,ORnavg,logORnavg)
}
else if (object$model.y$family$link=="logit" & logit=="OR"){
res=ORnavg
}
else if (object$model.y$family$link=="logit" & logit=="logOR"){
res=logORnavg
}
else{
res=navg
}
}
}
return(res)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_objects.R
\name{TargetHttpsProxyList}
\alias{TargetHttpsProxyList}
\title{TargetHttpsProxyList Object}
\usage{
TargetHttpsProxyList(id = NULL, items = NULL, nextPageToken = NULL,
selfLink = NULL)
}
\arguments{
\item{id}{[Output Only] The unique identifier for the resource}
\item{items}{A list of TargetHttpsProxy resources}
\item{nextPageToken}{[Output Only] This token allows you to get the next page of results for list requests}
\item{selfLink}{[Output Only] Server-defined URL for this resource}
}
\value{
TargetHttpsProxyList object
}
\description{
TargetHttpsProxyList Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Contains a list of TargetHttpsProxy resources.
}
|
/googlecomputealpha.auto/man/TargetHttpsProxyList.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false
| true
| 804
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_objects.R
\name{TargetHttpsProxyList}
\alias{TargetHttpsProxyList}
\title{TargetHttpsProxyList Object}
\usage{
TargetHttpsProxyList(id = NULL, items = NULL, nextPageToken = NULL,
selfLink = NULL)
}
\arguments{
\item{id}{[Output Only] The unique identifier for the resource}
\item{items}{A list of TargetHttpsProxy resources}
\item{nextPageToken}{[Output Only] This token allows you to get the next page of results for list requests}
\item{selfLink}{[Output Only] Server-defined URL for this resource}
}
\value{
TargetHttpsProxyList object
}
\description{
TargetHttpsProxyList Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Contains a list of TargetHttpsProxy resources.
}
|
## Description: Basic NCA AUC function
## Key words: NCA, AUC, function
## Author: Tarj Sahota, Peter Lawrence
## Run interactively: FALSE
AUC <- function(time, conc, loq=0,method=c("linuplogdown","linuplogdowntmax","linear"))
{
method <- match.arg(method)
trap.log.rule <- function(x,y){ ## custom function for log down
y1 <- y[-length(y)]
y2 <- y[-1]
sum(diff(x)*(y1-y2)/(log(y1)-log(y2)))
}
ok = !is.na(time) & !is.na(conc) & conc >= loq
time <- time[ok]
conc <- conc[ok]
if(method=="linear") return(Hmisc::trap.rule(time, conc))
if(method=="linuplogdowntmax"){
tmax <- time[which.max(conc)]
return(Hmisc::trap.rule(time[time<=tmax],conc[time<=tmax]) +
trap.log.rule(time[time>=tmax],conc[time>=tmax]))
}
if(method=="linuplogdown"){
up.diffs <- which(diff(conc)>=0)
down.diffs <- which(diff(conc)<0)
auc <- 0
if(length(up.diffs)>0){
linup <- sum(sapply(up.diffs,function(i){
Hmisc::trap.rule(time[i:(i+1)],conc[i:(i+1)])
}))
auc <- auc + linup
}
if(length(down.diffs)>0){
logdown <- sum(sapply(down.diffs,function(i){
trap.log.rule(time[i:(i+1)],conc[i:(i+1)])
}))
auc <- auc + logdown
}
return(auc)
}
}
|
/Scripts/AUC.R
|
no_license
|
tsahota/PMXcodelibrary
|
R
| false
| false
| 1,259
|
r
|
## Description: Basic NCA AUC function
## Key words: NCA, AUC, function
## Author: Tarj Sahota, Peter Lawrence
## Run interactively: FALSE
AUC <- function(time, conc, loq=0,method=c("linuplogdown","linuplogdowntmax","linear"))
{
method <- match.arg(method)
trap.log.rule <- function(x,y){ ## custom function for log down
y1 <- y[-length(y)]
y2 <- y[-1]
sum(diff(x)*(y1-y2)/(log(y1)-log(y2)))
}
ok = !is.na(time) & !is.na(conc) & conc >= loq
time <- time[ok]
conc <- conc[ok]
if(method=="linear") return(Hmisc::trap.rule(time, conc))
if(method=="linuplogdowntmax"){
tmax <- time[which.max(conc)]
return(Hmisc::trap.rule(time[time<=tmax],conc[time<=tmax]) +
trap.log.rule(time[time>=tmax],conc[time>=tmax]))
}
if(method=="linuplogdown"){
up.diffs <- which(diff(conc)>=0)
down.diffs <- which(diff(conc)<0)
auc <- 0
if(length(up.diffs)>0){
linup <- sum(sapply(up.diffs,function(i){
Hmisc::trap.rule(time[i:(i+1)],conc[i:(i+1)])
}))
auc <- auc + linup
}
if(length(down.diffs)>0){
logdown <- sum(sapply(down.diffs,function(i){
trap.log.rule(time[i:(i+1)],conc[i:(i+1)])
}))
auc <- auc + logdown
}
return(auc)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate.R
\name{isAccessibleFunction}
\alias{isAccessibleFunction}
\title{Do we have access to the function in test generation phase, to recompute the return value from the arguments?}
\usage{
isAccessibleFunction(function.name)
}
\arguments{
\item{function.name}{string name of binding}
}
\value{
boolean value signaling whether evaluating the supplied function name in the global scope yields the function or not.
}
\description{
Do we have access to the function in test generation phase, to recompute the return value from the arguments?
}
\examples{
isAccessibleFunction("myFn1") == FALSE
isAccessibleFunction("ggplot2::ggplot") == TRUE
}
|
/man/isAccessibleFunction.Rd
|
no_license
|
mvacha/genthat
|
R
| false
| true
| 724
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate.R
\name{isAccessibleFunction}
\alias{isAccessibleFunction}
\title{Do we have access to the function in test generation phase, to recompute the return value from the arguments?}
\usage{
isAccessibleFunction(function.name)
}
\arguments{
\item{function.name}{string name of binding}
}
\value{
boolean value signaling whether evaluating the supplied function name in the global scope yields the function or not.
}
\description{
Do we have access to the function in test generation phase, to recompute the return value from the arguments?
}
\examples{
isAccessibleFunction("myFn1") == FALSE
isAccessibleFunction("ggplot2::ggplot") == TRUE
}
|
alzGenes <- scan('~/Desktop/alzgenes.txt',what='character')
alzGenes <- unique(alzGenes)
library(metaNet)
enr1<-metaNet::enrichmentPath(rankedList = newDf$hgncSymbol,targetList = alzGenes)
library(ggplot2)
enr1$pval <- -log10(as.numeric(enr1$pval))
enr1$enr <- as.numeric(enr1$enr)
enr1$rank <- 1:nrow(enr1)
require(dplyr)
enrichmentDf <- data.frame(class=newDf$driverScore %>% unique)
enrichmentDf$enrichment <- rep(0,enrichmentDf$class %>% length)
enrichmentDf$log10pvalue <- rep(0,enrichmentDf$class %>% length)
enrichmentDf$ngenes <- rep(0,enrichmentDf$class %>% length)
for (i in 1:nrow(enrichmentDf)){
enrRes <- metaNet::enrichment(alzGenes,newDf$hgncSymbol[newDf$driverScore>=enrichmentDf$class[i]],newDf$hgncSymbol)
print(enrRes)
enrichmentDf$ngenes[i] <- as.integer(sum(newDf$driverScore>=enrichmentDf$class[i]))
enrichmentDf$enrichment[i] <- as.numeric(enrRes$enr)
enrichmentDf$log10pvalue[i] <- -log10(as.numeric(enrRes$pval))
}
enrichmentDf$pvalue <- 10^(-enrichmentDf$log10pvalue)
str(enr1)
a <- ggplot(enrichmentDf,aes(ngenes,pvalue))
a + geom_bar(stat='identity',aes(fill=class)) + scale_x_log10() + xlab('Number of Genes') + ylab('P-value') +scale_y_log10() + theme_classic() + scale_fill_continuous(name='Driver Score\nGreater Than') + ggtitle('Enrichment of Known Alzheimer Genetic Loci')+ xlim(800,max(enrichmentDf$ngenes))
a <- ggplot(enrichmentDf,aes(ngenes,enrichment))
a + geom_bar(stat='identity',aes(fill=class)) + scale_x_log10() + xlab('Number of Genes') + ylab('Enrichment') + theme_classic() + scale_fill_continuous(name='Driver Score\nGreater Than') + ggtitle('Enrichment of Known Alzheimer Genetic Loci') + xlim(800,max(enrichmentDf$ngenes))
plot(-log10(as.numeric(enr1$pval[1:5000])))
|
/quickAnalyses.R
|
no_license
|
blogsdon/ROSMAP
|
R
| false
| false
| 1,739
|
r
|
alzGenes <- scan('~/Desktop/alzgenes.txt',what='character')
alzGenes <- unique(alzGenes)
library(metaNet)
enr1<-metaNet::enrichmentPath(rankedList = newDf$hgncSymbol,targetList = alzGenes)
library(ggplot2)
enr1$pval <- -log10(as.numeric(enr1$pval))
enr1$enr <- as.numeric(enr1$enr)
enr1$rank <- 1:nrow(enr1)
require(dplyr)
enrichmentDf <- data.frame(class=newDf$driverScore %>% unique)
enrichmentDf$enrichment <- rep(0,enrichmentDf$class %>% length)
enrichmentDf$log10pvalue <- rep(0,enrichmentDf$class %>% length)
enrichmentDf$ngenes <- rep(0,enrichmentDf$class %>% length)
for (i in 1:nrow(enrichmentDf)){
enrRes <- metaNet::enrichment(alzGenes,newDf$hgncSymbol[newDf$driverScore>=enrichmentDf$class[i]],newDf$hgncSymbol)
print(enrRes)
enrichmentDf$ngenes[i] <- as.integer(sum(newDf$driverScore>=enrichmentDf$class[i]))
enrichmentDf$enrichment[i] <- as.numeric(enrRes$enr)
enrichmentDf$log10pvalue[i] <- -log10(as.numeric(enrRes$pval))
}
enrichmentDf$pvalue <- 10^(-enrichmentDf$log10pvalue)
str(enr1)
a <- ggplot(enrichmentDf,aes(ngenes,pvalue))
a + geom_bar(stat='identity',aes(fill=class)) + scale_x_log10() + xlab('Number of Genes') + ylab('P-value') +scale_y_log10() + theme_classic() + scale_fill_continuous(name='Driver Score\nGreater Than') + ggtitle('Enrichment of Known Alzheimer Genetic Loci')+ xlim(800,max(enrichmentDf$ngenes))
a <- ggplot(enrichmentDf,aes(ngenes,enrichment))
a + geom_bar(stat='identity',aes(fill=class)) + scale_x_log10() + xlab('Number of Genes') + ylab('Enrichment') + theme_classic() + scale_fill_continuous(name='Driver Score\nGreater Than') + ggtitle('Enrichment of Known Alzheimer Genetic Loci') + xlim(800,max(enrichmentDf$ngenes))
plot(-log10(as.numeric(enr1$pval[1:5000])))
|
lancamentos <- c(
F,F,T,T,F,F,F,F,F,F,
T,T,F,F,F,F,T,T,F,F,
T,F,F,F,T,F,F,F,T,T,
T,T,F,F,F,F,T,T,T,F,
T,F,F,T,F,F,T,F,T,F,
F,F,T,T,T,T,T,F,F,F,
T,T,F,T
)
complementos <- sample(c(T,F), replace=TRUE, size=100000)
lancamentos <- c(lancamentos, complementos)
n_lancamento <- 1:length(lancamentos)
tab <- data.frame(n_lancamento, lancamentos) %>%
mutate(lado = ifelse(lancamentos, "cara", "coroa"),
vlr = 1) %>%
select(-lancamentos) %>%
spread(lado, vlr, fill = 0) %>%
mutate(cara_ac = cumsum(cara),
coroa_ac = cumsum(coroa),
prob_cara = cara_ac / n_lancamento,
prob_coroa = coroa_ac / n_lancamento)
ggplot(tab, aes(x = n_lancamento)) +
geom_line(aes(y = prob_cara), color = "blue", size = 1) +
geom_line(aes(y = prob_coroa), color = "green", size = 1) +
geom_hline(yintercept = 0.5, color = "goldenrod", size = 2, linetype = "dashed")
|
/vi_experimento_moeda.R
|
no_license
|
tiagombp/learning-rstats
|
R
| false
| false
| 903
|
r
|
lancamentos <- c(
F,F,T,T,F,F,F,F,F,F,
T,T,F,F,F,F,T,T,F,F,
T,F,F,F,T,F,F,F,T,T,
T,T,F,F,F,F,T,T,T,F,
T,F,F,T,F,F,T,F,T,F,
F,F,T,T,T,T,T,F,F,F,
T,T,F,T
)
complementos <- sample(c(T,F), replace=TRUE, size=100000)
lancamentos <- c(lancamentos, complementos)
n_lancamento <- 1:length(lancamentos)
tab <- data.frame(n_lancamento, lancamentos) %>%
mutate(lado = ifelse(lancamentos, "cara", "coroa"),
vlr = 1) %>%
select(-lancamentos) %>%
spread(lado, vlr, fill = 0) %>%
mutate(cara_ac = cumsum(cara),
coroa_ac = cumsum(coroa),
prob_cara = cara_ac / n_lancamento,
prob_coroa = coroa_ac / n_lancamento)
ggplot(tab, aes(x = n_lancamento)) +
geom_line(aes(y = prob_cara), color = "blue", size = 1) +
geom_line(aes(y = prob_coroa), color = "green", size = 1) +
geom_hline(yintercept = 0.5, color = "goldenrod", size = 2, linetype = "dashed")
|
##########################################################
# Create edx set, validation set (final hold-out test set)
# Code provided by course, I take no credit for this section.
##########################################################
# Note: this process could take a couple of minutes
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
if(!require(scales)) install.packages("scales", repos = "http://cran.us.r-project.org")
library(tidyverse)
library(caret)
library(data.table)
library(scales)
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines("movies.dat"), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
# if using R 4.0 or later:
# Added code to split year data out from movie title
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(movieId),
title = as.character(title),
genres = as.character(genres),
year = as.numeric(substr(as.character(title),nchar(as.character(title))-4,nchar(as.character(title))-1)))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind="Rounding") # if using R 3.5 or earlier, use `set.seed(1)`
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
##### end edx section #######
#Begin RMSE Model
# Root Mean Square Error Function
RMSE <- function(actual_rating, predicted_rating)
{
sqrt(mean((actual_rating - predicted_rating)^2))
}
adj_factors <- seq(0, 10, 0.5) #test for lambda value
rmses <- sapply(adj_factors, function(l){
mts <- mean(edx$rating) # mean rating of training set
me <- edx %>%
group_by(movieId) %>% #adjust by movie rating
summarize(me = sum(rating - mts)/(n()+l), .groups = 'drop') # penalize low number of ratings
# adjust mean by user ratings and movie ratings and penalize low number of ratings
am <- edx %>%
left_join(me, by="movieId") %>%
group_by(userId) %>% #adjust by user rating
summarize(am = sum(rating - me - mts)/(n()+l), .groups = 'drop')
# calculate predicated ratings based on movie and user effects
predicted_ratings <-
validation %>%
left_join(me, by = "movieId") %>%
left_join(am, by = "userId") %>%
mutate(pred = mts + me + am) %>% # combine all three adjustments to make a prediction
.$pred
return(RMSE(predicted_ratings, validation$rating))
})
plot(adj_factors, rmses)
adj_factor <- adj_factors[which.min(rmses)]
paste('RMSE:',min(rmses))
|
/2021_01_05_Mulhallp_MovieLens_R.R
|
no_license
|
pmulhall/movielens
|
R
| false
| false
| 3,593
|
r
|
##########################################################
# Create edx set, validation set (final hold-out test set)
# Code provided by course, I take no credit for this section.
##########################################################
# Note: this process could take a couple of minutes
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
if(!require(scales)) install.packages("scales", repos = "http://cran.us.r-project.org")
library(tidyverse)
library(caret)
library(data.table)
library(scales)
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines("movies.dat"), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
# if using R 4.0 or later:
# Added code to split year data out from movie title
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(movieId),
title = as.character(title),
genres = as.character(genres),
year = as.numeric(substr(as.character(title),nchar(as.character(title))-4,nchar(as.character(title))-1)))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind="Rounding") # if using R 3.5 or earlier, use `set.seed(1)`
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
##### end edx section #######
#Begin RMSE Model
# Root Mean Square Error Function
RMSE <- function(actual_rating, predicted_rating)
{
sqrt(mean((actual_rating - predicted_rating)^2))
}
adj_factors <- seq(0, 10, 0.5) #test for lambda value
rmses <- sapply(adj_factors, function(l){
mts <- mean(edx$rating) # mean rating of training set
me <- edx %>%
group_by(movieId) %>% #adjust by movie rating
summarize(me = sum(rating - mts)/(n()+l), .groups = 'drop') # penalize low number of ratings
# adjust mean by user ratings and movie ratings and penalize low number of ratings
am <- edx %>%
left_join(me, by="movieId") %>%
group_by(userId) %>% #adjust by user rating
summarize(am = sum(rating - me - mts)/(n()+l), .groups = 'drop')
# calculate predicated ratings based on movie and user effects
predicted_ratings <-
validation %>%
left_join(me, by = "movieId") %>%
left_join(am, by = "userId") %>%
mutate(pred = mts + me + am) %>% # combine all three adjustments to make a prediction
.$pred
return(RMSE(predicted_ratings, validation$rating))
})
plot(adj_factors, rmses)
adj_factor <- adj_factors[which.min(rmses)]
paste('RMSE:',min(rmses))
|
################################################################################
################################################################################
##
## Analysis script for ... Ogle, DH. 201X. An Algorithm for the von Bertalanffy
## Seasonal Cessation in Growth Function of Pauly et al. (1992). Fisheries
## Research XX:XXX-XXX.
##
## Need to be patient with bootstrapping functions. May also need to create a
## directory called "results" in your current working directory to hold the
## figures produced by pdf() (or not run the pdf() and dev.off() functions to
## simply produce the figures on the local device). Could use (in R) to
## create the directory (assumes that you have set your working directory to
## the same location as this script) ...
##
## dir.create("results")
##
## This code was tested on a Windows 7 machine using 32-bit R v3.3.1 and a
## Macintosh (El Capitan OS) machine using 64-bit R v3.3.1. The code runs
## without error on both machines, though there are several warnings related
## to model convergence during the bootstrapping procedures.
##
################################################################################
################################################################################
################################################################################
## SETUP
##
## Requires FSA (>=0.8.8) and FSAdata (>=0.3.3) from CRAN, installed with:
##
## install.packages("FSA")
## install.packages("FSAdata")
##
################################################################################
## Load required packages
library(FSAdata) # for Bonito and Mosquitofish data
library(FSA) # for Somers and Pauly function functions
library(nlstools) # for nls model bootstrapping
## Create a function for the Typical VBGF
vbTyp <- vbFuns("Typical")
## Create a function for the Somers VBGF
vbSO <- vbFuns("Somers")
## Create a function for the Pauly et al. VBGF
( vbPA <- vbFuns("Pauly") )
## Note that vbPA uses an internal function for computing t-prime. The next line
## displays this function (Step comments correspond to steps in the manuscript).
FSA:::iCalc_tpr
## Increase the maximum number of iterations for convergence in nls()
ctrl <- nls.control(maxiter=200)
## Set the random number seed so that the bootstraps stay reproducible
set.seed(730987)
################################################################################
## Example analysis with the Bonito data
################################################################################
data(Bonito)
## 1. Fit typical model
Tlwrbnd <- c(Linf=0,K=0,t0=-Inf)
Tuprbnd <- c(Linf=Inf,K=Inf,t0=Inf)
TsvBon <- list(Linf=60,K=0.4,t0=-2.4)
TfitBon <- nls(fl~vbTyp(age,Linf,K,t0),data=Bonito,
start=TsvBon,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
TbootBon <- nlsBoot(TfitBon)
TcfBon <- cbind(Est=coef(TfitBon),confint(TbootBon))
## 2. Fit Somers function with C<=1 constraint (as in Stewart et al. (2013))
## Linf=71.9, K=0.27, t0=-1.92, C=1, and ts=0.09 ... all matched (w/in rounding)
Slwrbnd <- c(Linf=0,K=0,t0=-Inf,C=0,ts=0)
SuprbndB <- c(Linf=Inf,K=Inf,t0=Inf,C=1,ts=1)
SsvBon <- list(Linf=60,K=0.4,t0=-1.5,C=0.6,ts=0.2)
SfitBon <- nls(fl~vbSO(age,Linf,K,t0,C,ts),data=Bonito,
start=SsvBon,lower=Slwrbnd,upper=SuprbndB,
algorithm="port",control=ctrl)
SbootBon <- nlsBoot(SfitBon)
ScfBon <- cbind(Est=coef(SfitBon),confint(SbootBon))
## 3. Fit new Pauly et al. (1992) function
Plwrbnd <- c(Linf=0,Kpr=0,t0=-Inf,ts=0,NGT=0)
Puprbnd <- c(Linf=Inf,Kpr=Inf,t0=Inf,ts=1,NGT=1)
PsvBon <- list(Linf=60,Kpr=0.5,t0=1.3,ts=0.25,NGT=0.2)
PfitBon <- nls(fl~vbPA(age,Linf,Kpr,t0,ts,NGT),data=Bonito,
start=PsvBon,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
PbootBon <- nlsBoot(PfitBon)
PcfBon <- cbind(Est=coef(PfitBon),confint(PbootBon))
## 4. Summary results
TcfBon <- rbind(TcfBon,c(AIC(TfitBon),NA,NA),c(deviance(TfitBon),NA,NA))
ScfBon <- rbind(ScfBon,c(AIC(SfitBon),NA,NA),c(deviance(SfitBon),NA,NA))
PcfBon <- rbind(PcfBon,c(AIC(PfitBon),NA,NA),c(deviance(PfitBon),NA,NA))
rownames(TcfBon)[4:5] <- rownames(ScfBon)[6:7] <- rownames(PcfBon)[6:7] <- c("AIC","RSS")
print(round(TcfBon,2),na.print="-")
print(round(ScfBon,2),na.print="-")
print(round(PcfBon,2),na.print="-")
################################################################################
## Example analysis with the Mosquitofish data -- Site 2
################################################################################
data(Mosquitofish)
mqf2 <- subset(Mosquitofish,sitenum==2)
## 1. Fit typical model
Tsvmqf2 <- list(Linf=40,K=0.6,t0=-1)
Tfitmqf2 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf2,
start=Tsvmqf2,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
Tbootmqf2 <- nlsBoot(Tfitmqf2)
Tcfmqf2 <- cbind(Est=coef(Tfitmqf2),confint(Tbootmqf2))
## 2. Fit Somers function with C unconstrained (as in Carmona-Catot et al. (2014))
## Linf=35.85, K=2.012, t0=-0.02, C=1.95, and ts=-0.118 ...
## they all match (within rounding) except ts but it is off by +1
SuprbndM <- c(Linf=Inf,K=Inf,t0=Inf,C=Inf,ts=1)
Ssvmqf2 <- list(Linf=40,K=0.5,t0=-1,C=1.5,ts=0.9)
Sfitmqf2 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf2,
start=Ssvmqf2,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
Sbootmqf2 <- nlsBoot(Sfitmqf2)
Scfmqf2 <- cbind(Est=coef(Sfitmqf2),confint(Sbootmqf2))
## 3. Fit new Pauly et al. (1992) function
Psvmqf2 <- list(Linf=40,Kpr=0.6,t0=-1,ts=0.9,NGT=0.5)
Pfitmqf2 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf2,
start=Psvmqf2,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
Pbootmqf2 <- nlsBoot(Pfitmqf2)
Pcfmqf2 <- cbind(Est=coef(Pfitmqf2),confint(Pbootmqf2))
## 4. Summary results
Tcfmqf2 <- rbind(Tcfmqf2,c(AIC(Tfitmqf2),NA,NA),c(deviance(Tfitmqf2),NA,NA))
Scfmqf2 <- rbind(Scfmqf2,c(AIC(Sfitmqf2),NA,NA),c(deviance(Sfitmqf2),NA,NA))
Pcfmqf2 <- rbind(Pcfmqf2,c(AIC(Pfitmqf2),NA,NA),c(deviance(Pfitmqf2),NA,NA))
rownames(Tcfmqf2)[4:5] <- rownames(Scfmqf2)[6:7] <- rownames(Pcfmqf2)[6:7] <- c("AIC","RSS")
print(round(Tcfmqf2,2),na.print="-")
print(round(Scfmqf2,2),na.print="-")
print(round(Pcfmqf2,2),na.print="-")
################################################################################
## Example analysis with the Mosquitofish data -- Site 4
################################################################################
mqf4 <- subset(Mosquitofish,sitenum==4)
## 1. Fit typical model
Tsvmqf4 <- list(Linf=45,K=1.2,t0=-0.2)
Tfitmqf4 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf4,
start=Tsvmqf4,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
Tbootmqf4 <- nlsBoot(Tfitmqf4)
Tcfmqf4 <- cbind(Est=coef(Tfitmqf4),confint(Tbootmqf4))
## 2. Fit Somers function with C unconstrained (as in Carmona-Catot et al. (2014))
Ssvmqf4 <- list(Linf=40,K=0.9,t0=-0.5,C=1.2,ts=0.9)
Sfitmqf4 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf4,
start=Ssvmqf4,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
Sbootmqf4 <- nlsBoot(Sfitmqf4)
Scfmqf4 <- cbind(Est=coef(Sfitmqf4),confint(Sbootmqf4))
## 3. Fit new Pauly et al. (1992) function
Psvmqf4 <- list(Linf=40,Kpr=1.5,t0=0,ts=0.7,NGT=0.3)
Pfitmqf4 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf4,
start=Psvmqf4,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
Pbootmqf4 <- nlsBoot(Pfitmqf4)
Pcfmqf4 <- cbind(Est=coef(Pfitmqf4),confint(Pbootmqf4))
## 4. Summary results
Tcfmqf4 <- rbind(Tcfmqf4,c(AIC(Tfitmqf4),NA,NA),c(deviance(Tfitmqf4),NA,NA))
Scfmqf4 <- rbind(Scfmqf4,c(AIC(Sfitmqf4),NA,NA),c(deviance(Sfitmqf4),NA,NA))
Pcfmqf4 <- rbind(Pcfmqf4,c(AIC(Pfitmqf4),NA,NA),c(deviance(Pfitmqf4),NA,NA))
rownames(Tcfmqf4)[4:5] <- rownames(Scfmqf4)[6:7] <- rownames(Pcfmqf4)[6:7] <- c("AIC","RSS")
print(round(Tcfmqf4,2),na.print="-")
print(round(Scfmqf4,2),na.print="-")
print(round(Pcfmqf4,2),na.print="-")
################################################################################
## Example analysis with the Mosquitofish data -- Site 9
################################################################################
mqf9 <- subset(Mosquitofish,sitenum==9)
## 1. Fit typical model
Tsvmqf9 <- list(Linf=40,K=1.5,t0=0)
Tfitmqf9 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf9,
start=Tsvmqf9,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
Tbootmqf9 <- nlsBoot(Tfitmqf9)
Tcfmqf9 <- cbind(Est=coef(Tfitmqf9),confint(Tbootmqf9))
## 2. Fit Somers function with C unconstrained (as in Carmona-Catot et al. (2014))
Ssvmqf9 <- list(Linf=40,K=1.2,t0=-0.2,C=0.6,ts=0.85)
Sfitmqf9 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf9,
start=Ssvmqf9,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
Sbootmqf9 <- nlsBoot(Sfitmqf9)
Scfmqf9 <- cbind(Est=coef(Sfitmqf9),confint(Sbootmqf9))
## 3. Fit new Pauly et al. (1992) function
Psvmqf9 <- list(Linf=40,Kpr=1,t0=-0.2,ts=0.6,NGT=0.05)
Pfitmqf9 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf9,
start=Psvmqf9,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
Pbootmqf9 <- nlsBoot(Pfitmqf9)
Pcfmqf9 <- cbind(Est=coef(Pfitmqf9),confint(Pbootmqf9))
## 4. Summary results
Tcfmqf9 <- rbind(Tcfmqf9,c(AIC(Tfitmqf9),NA,NA),c(deviance(Tfitmqf9),NA,NA))
Scfmqf9 <- rbind(Scfmqf9,c(AIC(Sfitmqf9),NA,NA),c(deviance(Sfitmqf9),NA,NA))
Pcfmqf9 <- rbind(Pcfmqf9,c(AIC(Pfitmqf9),NA,NA),c(deviance(Pfitmqf9),NA,NA))
rownames(Tcfmqf4)[4:5] <- rownames(Scfmqf9)[6:7] <- rownames(Pcfmqf9)[6:7] <- c("AIC","RSS")
print(round(Tcfmqf9,2),na.print="-")
print(round(Scfmqf9,2),na.print="-")
print(round(Pcfmqf9,2),na.print="-")
################################################################################
################################################################################
## Create Figures
################################################################################
################################################################################
## Demo of the Somers model
pdf("results/Figure_1.PDF",width=4,height=4)
par(xaxs="i",yaxs="i",mar=c(3,3,0.6,0.6),mgp=c(1.7,.4,0),tcl=-0.2,las=1,cex=0.9)
ts <- 0.05; Linf <- 30; K <- 0.3; t0 <- -0.1
C <- c(0,0.5,1,2)
clr <- col2rgbt(rep("black",4),1/(1:4))
t <- seq(0,3,length.out=299)
plot(vbSO(t,Linf=c(Linf,K,t0,C=C[1],ts))~t,type="l",lwd=2,col=clr[1],
ylim=c(0,20),ylab="Mean Length",xlab="Age (years)",xaxt="n")
axis(1,0:3)
lines(vbSO(t,Linf=c(Linf,K,t0,C=C[2],ts))~t,lwd=2,col=clr[2])
lines(vbSO(t,Linf=c(Linf,K,t0,C=C[3],ts))~t,lwd=2,col=clr[3])
lines(vbSO(t,Linf=c(Linf,K,t0,C=C[4],ts))~t,lwd=2,col=clr[4])
legend("topleft",paste("C",C,sep="="),lwd=2,col=clr,bty="n",inset=-0.02)
dev.off()
## Demo of the Pauly et al. (1992) function
pdf("results/Figure_2.PDF",width=4,height=4)
par(xaxs="i",yaxs="i",mar=c(3,3,0.6,0.6),mgp=c(1.7,.4,0),tcl=-0.2,las=1,cex=0.9)
Kpr <- 0.35; NGT <- 0.3
Pcf <- c(Linf,Kpr,t0,ts,NGT)
t <- seq(-0.1,3.3,length.out=499)
PL <- vbPA(t,Linf=Pcf)
plot(c(-1,-1),xlim=c(-0.1,3.5),ylim=c(0,22),xaxt="n",
ylab="Length",xlab="Age (years)")
WPs <- 0:2+ts+0.5
LatWPs <- vbPA(WPs,Linf=Pcf)
SNGTs <- WPs-NGT/2
ENGTs <- WPs+NGT/2
for (i in 1:length(SNGTs))
polygon(c(SNGTs[i],SNGTs[i],ENGTs[i],ENGTs[i]),c(0,LatWPs[i],LatWPs[i],0),
col=col2rgbt("black",1/20),border=NA)
arrows(WPs,LatWPs-2,WPs,LatWPs-0.2,lwd=2,length=0.1)
arrows(SNGTs,LatWPs-1.2,ENGTs,LatWPs-1.2,lwd=2,length=0.025,angle=90,code=3)
lines(PL~t,lwd=2)
tss <- 0:3+ts
Lattss <- vbPA(tss,Linf=Pcf)
points(tss,Lattss,pch=16,col="gray50",cex=1.1)
axis(1,0:3)
axis(1,SNGTs,tcl=-0.2)
axis(1,ENGTs,tcl=-0.2)
# makes inside ticks
axis(1,at=c(0:3,SNGTs,ENGTs),labels=NA,tcl=0.2)
axis(1,at=1:3,labels=FSA:::iCalc_tpr(1:3,ts,NGT),tcl=0.2,line=-1.7,lwd=0)
axis(1,at=SNGTs,labels=FSA:::iCalc_tpr(SNGTs,ts,NGT),tcl=0.2,line=-1.7,lwd=0)
axis(1,at=ENGTs,labels=FSA:::iCalc_tpr(ENGTs,ts,NGT),tcl=0.2,line=-1.7,lwd=0)
axis(1,at=3.3,labels="<= t",tick=FALSE)
axis(1,at=3.3,labels="<= t'",tick=FALSE,line=-1.7,lwd=0)
dev.off()
## Summary of model fits
pdf("results/Figure_3.PDF",width=8,height=8)
par(mfrow=c(2,2),xaxs="i",yaxs="i",mar=c(3,3,0.6,0.6),mgp=c(1.7,.4,0),tcl=-0.2,las=1)
plot(fl~age,data=Bonito,pch=19,col=col2rgbt("black",1/4),
xlab="Age (years)",ylab="Fork Length (mm)",xlim=c(0,4),ylim=c(0,70))
curve(vbTyp(x,coef(TfitBon)),from=0,to=4,lwd=4,add=TRUE,col="gray25",lty=2)
curve(vbPA(x,coef(PfitBon)),from=0,to=4,lwd=4,add=TRUE)
curve(vbSO(x,coef(SfitBon)),from=0,to=4,lwd=2,add=TRUE,col="gray50")
text(grconvertX(0.08,"npc","user"),grconvertY(0.92,"npc","user"),"A",cex=1.5)
plot(sl~age2,data=mqf2,pch=19,col=col2rgbt("black",1/6),xaxt="n",
xlab="Age (years)",ylab="Standard Length (mm)",xlim=c(0,2.4),ylim=c(0,50))
axis(1,0:2)
curve(vbTyp(x,coef(Tfitmqf2)),from=0,to=3,lwd=4,add=TRUE,col="gray25",lty=2)
curve(vbPA(x,coef(Pfitmqf2)),from=0,to=3,lwd=4,add=TRUE)
curve(vbSO(x,coef(Sfitmqf2)),from=0,to=3,lwd=2,add=TRUE,col="gray50")
text(grconvertX(0.08,"npc","user"),grconvertY(0.92,"npc","user"),"B",cex=1.5)
plot(sl~age2,data=mqf4,pch=19,col=col2rgbt("black",1/6),xaxt="n",
xlab="Age (years)",ylab="Standard Length (mm)",xlim=c(0,2.4),ylim=c(0,50))
axis(1,0:2)
curve(vbTyp(x,coef(Tfitmqf4)),from=0,to=3,lwd=4,add=TRUE,col="gray25",lty=2)
curve(vbPA(x,coef(Pfitmqf4)),from=0,to=3,lwd=4,add=TRUE)
curve(vbSO(x,coef(Sfitmqf4)),from=0,to=3,lwd=2,add=TRUE,col="gray50")
text(grconvertX(0.08,"npc","user"),grconvertY(0.92,"npc","user"),"C",cex=1.5)
plot(sl~age2,data=mqf9,pch=19,col=col2rgbt("black",1/6),xaxt="n",
xlab="Age (years)",ylab="Standard Length (mm)",xlim=c(0,2.4),ylim=c(0,50))
axis(1,0:2)
curve(vbTyp(x,coef(Tfitmqf9)),from=0,to=3,lwd=4,add=TRUE,col="gray25",lty=2)
curve(vbPA(x,coef(Pfitmqf9)),from=0,to=3,lwd=4,add=TRUE)
curve(vbSO(x,coef(Sfitmqf9)),from=0,to=3,lwd=2,add=TRUE,col="gray50")
text(grconvertX(0.08,"npc","user"),grconvertY(0.92,"npc","user"),"D",cex=1.5)
dev.off()
################################################################################
################################################################################
## Testing different starting values for model fits ... per reviewer request
## Just checking for convergence and relationship to parameter estimates
## from the fits above.
##
## Some of these models failed to converge on a 64-bit Mac (El Capitan OS). As
## this code is used only to demonstrate that a globabl minimum was found, I
## did not work to find starting values that led to convergence on all OS. In
## my testing, the PfitBon1 and Pfitmqf42 models did not converged on that Mac.
##
################################################################################
################################################################################
# Bonito
TsvBon1 <- list(Linf=60,K=0.3,t0=0)
TfitBon1 <- nls(fl~vbTyp(age,Linf,K,t0),data=Bonito,
start=TsvBon1,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
TsvBon2 <- list(Linf=90,K=0.6,t0=0)
TfitBon2 <- nls(fl~vbTyp(age,Linf,K,t0),data=Bonito,
start=TsvBon2,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
TsvBon3 <- list(Linf=80,K=0.1,t0=-2)
TfitBon3 <- nls(fl~vbTyp(age,Linf,K,t0),data=Bonito,
start=TsvBon3,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
cbind(TsvBon,TsvBon1,TsvBon2,TsvBon3) # The starting values
round(cbind(coef(TfitBon),coef(TfitBon1),coef(TfitBon2),
coef(TfitBon2)),4) # Parameter estimates look good (i.e., comparable)
SsvBon1 <- list(Linf=60,K=0.3,t0=-2,C=0.3,ts=0.1)
SfitBon1 <- nls(fl~vbSO(age,Linf,K,t0,C,ts),data=Bonito,
start=SsvBon1,lower=Slwrbnd,upper=SuprbndB,
algorithm="port",control=ctrl)
SsvBon2 <- list(Linf=40,K=0.1,t0=0,C=0.7,ts=0.3)
SfitBon2 <- nls(fl~vbSO(age,Linf,K,t0,C,ts),data=Bonito,
start=SsvBon2,lower=Slwrbnd,upper=SuprbndB,
algorithm="port",control=ctrl)
SsvBon3 <- list(Linf=60,K=0.5,t0=0,C=0.5,ts=0.5)
SfitBon3 <- nls(fl~vbSO(age,Linf,K,t0,C,ts),data=Bonito,
start=SsvBon3,lower=Slwrbnd,upper=SuprbndB,
algorithm="port",control=ctrl)
cbind(SsvBon,SsvBon1,SsvBon2,SsvBon3) # The starting values
round(cbind(coef(SfitBon),coef(SfitBon1),coef(SfitBon2),
coef(SfitBon3)),4) # Parameter estimates look good (i.e., comparable)
PsvBon1 <- list(Linf=60,Kpr=0.2,t0=0,ts=0.1,NGT=0.1)
PfitBon1 <- nls(fl~vbPA(age,Linf,Kpr,t0,ts,NGT),data=Bonito,
start=PsvBon1,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
PsvBon2 <- list(Linf=50,Kpr=0.7,t0=-1,ts=0.3,NGT=0.5)
PfitBon2 <- nls(fl~vbPA(age,Linf,Kpr,t0,ts,NGT),data=Bonito,
start=PsvBon2,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
PsvBon3 <- list(Linf=70,Kpr=0.5,t0=-1,ts=0.5,NGT=0.5)
PfitBon3 <- nls(fl~vbPA(age,Linf,Kpr,t0,ts,NGT),data=Bonito,
start=PsvBon3,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
cbind(PsvBon,PsvBon1,PsvBon2,PsvBon3) # Starting values
round(cbind(coef(PfitBon),coef(PfitBon1),coef(PfitBon2),
coef(PfitBon3)),4) # Parameter estimates look good (i.e., comparable)
# Mosquitofish Site 2
Tsvmqf21 <- list(Linf=60,K=0.3,t0=0)
Tfitmqf21 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf2,
start=Tsvmqf21,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
Tsvmqf22 <- list(Linf=90,K=0.6,t0=0)
Tfitmqf22 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf2,
start=Tsvmqf22,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
Tsvmqf23 <- list(Linf=80,K=0.1,t0=-2)
Tfitmqf23 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf2,
start=Tsvmqf23,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
cbind(Tsvmqf2,Tsvmqf21,Tsvmqf22,Tsvmqf23) # The starting values
round(cbind(coef(Tfitmqf2),coef(Tfitmqf21),coef(Tfitmqf22),
coef(Tfitmqf22)),4) # Parameter estimates look good (i.e., comparable)
Ssvmqf21 <- list(Linf=60,K=0.3,t0=-2,C=1,ts=0.9)
Sfitmqf21 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf2,
start=Ssvmqf21,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
Ssvmqf22 <- list(Linf=40,K=0.1,t0=0,C=1.9,ts=0.7)
Sfitmqf22 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf2,
start=Ssvmqf22,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
Ssvmqf23 <- list(Linf=40,K=0.5,t0=0,C=1.4,ts=0.7)
Sfitmqf23 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf2,
start=Ssvmqf23,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
cbind(Ssvmqf2,Ssvmqf21,Ssvmqf22,Ssvmqf23) # The starting values
round(cbind(coef(Sfitmqf2),coef(Sfitmqf21),coef(Sfitmqf22),
coef(Sfitmqf23)),4) # Parameter estimates look good (i.e., comparable)
# Some starting values did produce different estimates, but fit was worse. e.g.,
# Ssvmqf2Z <- list(Linf=60,K=0.5,t0=-2,C=1.3,ts=0.5)
# Sfitmqf2Z <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf2,start=Ssvmqf2Z,lower=Slwrbnd,upper=SuprbndM,algorithm="port",control=ctrl)
# coef(Sfitmqf2Z)
# deviance(Sfitmqf2Z)
Psvmqf21 <- list(Linf=60,Kpr=0.2,t0=0,ts=0.7,NGT=0.1)
Pfitmqf21 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf2,
start=Psvmqf21,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
Psvmqf22 <- list(Linf=40,Kpr=0.4,t0=-1,ts=0.7,NGT=0.5)
Pfitmqf22 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf2,
start=Psvmqf22,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
Psvmqf23 <- list(Linf=60,Kpr=0.5,t0=0,ts=0.5,NGT=0.3)
Pfitmqf23 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf2,
start=Psvmqf23,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
cbind(Psvmqf2,Psvmqf21,Psvmqf22,Psvmqf23) # Starting values
round(cbind(coef(Pfitmqf2),coef(Pfitmqf21),coef(Pfitmqf22),
coef(Pfitmqf23)),4) # Parameter estimates look good (i.e., comparable)
# Mosquitofish Site 4
Tsvmqf41 <- list(Linf=60,K=0.3,t0=0)
Tfitmqf41 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf4,
start=Tsvmqf4,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
Tsvmqf42 <- list(Linf=90,K=0.6,t0=0)
Tfitmqf42 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf4,
start=Tsvmqf42,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
Tsvmqf43 <- list(Linf=80,K=0.1,t0=-2)
Tfitmqf43 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf4,
start=Tsvmqf43,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
cbind(Tsvmqf4,Tsvmqf41,Tsvmqf42,Tsvmqf43) # The starting values
round(cbind(coef(Tfitmqf4),coef(Tfitmqf41),coef(Tfitmqf42),
coef(Tfitmqf42)),4) # Parameter estimates look good (i.e., comparable)
Ssvmqf41 <- list(Linf=60,K=0.3,t0=0,C=1,ts=0.9)
Sfitmqf41 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf4,
start=Ssvmqf41,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
Ssvmqf42 <- list(Linf=60,K=0.9,t0=0,C=2,ts=0.8)
Sfitmqf42 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf4,
start=Ssvmqf42,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
Ssvmqf43 <- list(Linf=40,K=0.9,t0=0,C=1,ts=0.8)
Sfitmqf43 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf4,
start=Ssvmqf43,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
cbind(Ssvmqf4,Ssvmqf41,Ssvmqf42,Ssvmqf43) # The starting values
round(cbind(coef(Sfitmqf4),coef(Sfitmqf41),coef(Sfitmqf42),
coef(Sfitmqf43)),4) # Parameter estimates look good (i.e., comparable)
# Seems very sensitive to choice of ts (in terms of convergence)
Psvmqf41 <- list(Linf=60,Kpr=0.4,t0=-2,ts=0.7,NGT=0.5)
Pfitmqf41 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf4,
start=Psvmqf41,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
Psvmqf42 <- list(Linf=40,Kpr=0.3,t0=0,ts=0.9,NGT=0.7)
Pfitmqf42 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf4,
start=Psvmqf42,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
Psvmqf43 <- list(Linf=50,Kpr=0.2,t0=-2,ts=0.7,NGT=0.7)
Pfitmqf43 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf4,
start=Psvmqf43,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
cbind(Psvmqf4,Psvmqf41,Psvmqf42,Psvmqf43) # Starting values
round(cbind(coef(Pfitmqf4),coef(Pfitmqf41),coef(Pfitmqf42),
coef(Pfitmqf43)),4) # Parameter estimates look good (i.e., comparable)
# Mosquitofish Site 9
Tsvmqf91 <- list(Linf=60,K=0.3,t0=0)
Tfitmqf91 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf9,
start=Tsvmqf9,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
Tsvmqf92 <- list(Linf=90,K=0.6,t0=0)
Tfitmqf92 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf9,
start=Tsvmqf92,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
Tsvmqf93 <- list(Linf=80,K=0.1,t0=-2)
Tfitmqf93 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf9,
start=Tsvmqf93,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
cbind(Tsvmqf9,Tsvmqf91,Tsvmqf92,Tsvmqf93) # The starting values
round(cbind(coef(Tfitmqf9),coef(Tfitmqf91),coef(Tfitmqf92),
coef(Tfitmqf92)),4) # Parameter estimates look good (i.e., comparable)
Ssvmqf91 <- list(Linf=60,K=0.3,t0=-1,C=0.4,ts=0.8)
Sfitmqf91 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf9,
start=Ssvmqf91,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
Ssvmqf92 <- list(Linf=30,K=0.1,t0=-1,C=0.4,ts=0.9)
Sfitmqf92 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf9,
start=Ssvmqf92,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
Ssvmqf93 <- list(Linf=60,K=0.2,t0=-1,C=0.2,ts=0.6)
Sfitmqf93 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf9,
start=Ssvmqf93,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
cbind(Ssvmqf9,Ssvmqf91,Ssvmqf92,Ssvmqf93) # The starting values
round(cbind(coef(Sfitmqf9),coef(Sfitmqf91),coef(Sfitmqf92),
coef(Sfitmqf93)),4) # Parameter estimates look good (i.e., comparable)
# Difficult time finding starting values that led to convergence
Psvmqf91 <- list(Linf=40,Kpr=0.1,t0=0,ts=0.65,NGT=0.25)
Pfitmqf91 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf9,
start=Psvmqf91,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
Psvmqf92 <- list(Linf=60,Kpr=0.3,t0=-1,ts=0.95,NGT=0.5)
Pfitmqf92 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf9,
start=Psvmqf92,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
Psvmqf93 <- list(Linf=40,Kpr=0.2,t0=-1,ts=0.5,NGT=0.5)
Pfitmqf93 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf9,
start=Psvmqf93,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
cbind(Psvmqf9,Psvmqf91,Psvmqf92,Psvmqf93) # Starting values
round(cbind(coef(Pfitmqf9),coef(Pfitmqf91),coef(Pfitmqf92),
coef(Pfitmqf93)),4) # Parameter estimates look good (i.e., comparable)
# Some starting values produced different estimates, but fit was worse. e.g.,
# Psvmqf9Z <- list(Linf=60,Kpr=0.3,t0=0,ts=0.95,NGT=0.5)
# Pfitmqf9Z <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf9,start=Psvmqf9Z,lower=Plwrbnd,upper=Puprbnd,algorithm="port",control=ctrl)
# coef(Pfitmqf9Z)
# deviance(Pfitmqf9Z)
|
/resources/pubs/SeasonalGrowth_Analysis.R
|
permissive
|
treymaddaleno/droglenc.github.io
|
R
| false
| false
| 25,912
|
r
|
################################################################################
################################################################################
##
## Analysis script for ... Ogle, DH. 201X. An Algorithm for the von Bertalanffy
## Seasonal Cessation in Growth Function of Pauly et al. (1992). Fisheries
## Research XX:XXX-XXX.
##
## Need to be patient with bootstrapping functions. May also need to create a
## directory called "results" in your current working directory to hold the
## figures produced by pdf() (or not run the pdf() and dev.off() functions to
## simply produce the figures on the local device). Could use (in R) to
## create the directory (assumes that you have set your working directory to
## the same location as this script) ...
##
## dir.create("results")
##
## This code was tested on a Windows 7 machine using 32-bit R v3.3.1 and a
## Macintosh (El Capitan OS) machine using 64-bit R v3.3.1. The code runs
## without error on both machines, though there are several warnings related
## to model convergence during the bootstrapping procedures.
##
################################################################################
################################################################################
################################################################################
## SETUP
##
## Requires FSA (>=0.8.8) and FSAdata (>=0.3.3) from CRAN, installed with:
##
## install.packages("FSA")
## install.packages("FSAdata")
##
################################################################################
## Load required packages
library(FSAdata) # for Bonito and Mosquitofish data
library(FSA) # for Somers and Pauly function functions
library(nlstools) # for nls model bootstrapping
## Create a function for the Typical VBGF
vbTyp <- vbFuns("Typical")
## Create a function for the Somers VBGF
vbSO <- vbFuns("Somers")
## Create a function for the Pauly et al. VBGF
( vbPA <- vbFuns("Pauly") )
## Note that vbPA uses an internal function for computing t-prime. The next line
## displays this function (Step comments correspond to steps in the manuscript).
FSA:::iCalc_tpr
## Increase the maximum number of iterations for convergence in nls()
ctrl <- nls.control(maxiter=200)
## Set the random number seed so that the bootstraps stay reproducible
set.seed(730987)
################################################################################
## Example analysis with the Bonito data
################################################################################
data(Bonito)
## 1. Fit typical model
Tlwrbnd <- c(Linf=0,K=0,t0=-Inf)
Tuprbnd <- c(Linf=Inf,K=Inf,t0=Inf)
TsvBon <- list(Linf=60,K=0.4,t0=-2.4)
TfitBon <- nls(fl~vbTyp(age,Linf,K,t0),data=Bonito,
start=TsvBon,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
TbootBon <- nlsBoot(TfitBon)
TcfBon <- cbind(Est=coef(TfitBon),confint(TbootBon))
## 2. Fit Somers function with C<=1 constraint (as in Stewart et al. (2013))
## Linf=71.9, K=0.27, t0=-1.92, C=1, and ts=0.09 ... all matched (w/in rounding)
Slwrbnd <- c(Linf=0,K=0,t0=-Inf,C=0,ts=0)
SuprbndB <- c(Linf=Inf,K=Inf,t0=Inf,C=1,ts=1)
SsvBon <- list(Linf=60,K=0.4,t0=-1.5,C=0.6,ts=0.2)
SfitBon <- nls(fl~vbSO(age,Linf,K,t0,C,ts),data=Bonito,
start=SsvBon,lower=Slwrbnd,upper=SuprbndB,
algorithm="port",control=ctrl)
SbootBon <- nlsBoot(SfitBon)
ScfBon <- cbind(Est=coef(SfitBon),confint(SbootBon))
## 3. Fit new Pauly et al. (1992) function
Plwrbnd <- c(Linf=0,Kpr=0,t0=-Inf,ts=0,NGT=0)
Puprbnd <- c(Linf=Inf,Kpr=Inf,t0=Inf,ts=1,NGT=1)
PsvBon <- list(Linf=60,Kpr=0.5,t0=1.3,ts=0.25,NGT=0.2)
PfitBon <- nls(fl~vbPA(age,Linf,Kpr,t0,ts,NGT),data=Bonito,
start=PsvBon,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
PbootBon <- nlsBoot(PfitBon)
PcfBon <- cbind(Est=coef(PfitBon),confint(PbootBon))
## 4. Summary results
TcfBon <- rbind(TcfBon,c(AIC(TfitBon),NA,NA),c(deviance(TfitBon),NA,NA))
ScfBon <- rbind(ScfBon,c(AIC(SfitBon),NA,NA),c(deviance(SfitBon),NA,NA))
PcfBon <- rbind(PcfBon,c(AIC(PfitBon),NA,NA),c(deviance(PfitBon),NA,NA))
rownames(TcfBon)[4:5] <- rownames(ScfBon)[6:7] <- rownames(PcfBon)[6:7] <- c("AIC","RSS")
print(round(TcfBon,2),na.print="-")
print(round(ScfBon,2),na.print="-")
print(round(PcfBon,2),na.print="-")
################################################################################
## Example analysis with the Mosquitofish data -- Site 2
################################################################################
data(Mosquitofish)
mqf2 <- subset(Mosquitofish,sitenum==2)
## 1. Fit typical model
Tsvmqf2 <- list(Linf=40,K=0.6,t0=-1)
Tfitmqf2 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf2,
start=Tsvmqf2,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
Tbootmqf2 <- nlsBoot(Tfitmqf2)
Tcfmqf2 <- cbind(Est=coef(Tfitmqf2),confint(Tbootmqf2))
## 2. Fit Somers function with C unconstrained (as in Carmona-Catot et al. (2014))
## Linf=35.85, K=2.012, t0=-0.02, C=1.95, and ts=-0.118 ...
## they all match (within rounding) except ts but it is off by +1
SuprbndM <- c(Linf=Inf,K=Inf,t0=Inf,C=Inf,ts=1)
Ssvmqf2 <- list(Linf=40,K=0.5,t0=-1,C=1.5,ts=0.9)
Sfitmqf2 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf2,
start=Ssvmqf2,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
Sbootmqf2 <- nlsBoot(Sfitmqf2)
Scfmqf2 <- cbind(Est=coef(Sfitmqf2),confint(Sbootmqf2))
## 3. Fit new Pauly et al. (1992) function
Psvmqf2 <- list(Linf=40,Kpr=0.6,t0=-1,ts=0.9,NGT=0.5)
Pfitmqf2 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf2,
start=Psvmqf2,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
Pbootmqf2 <- nlsBoot(Pfitmqf2)
Pcfmqf2 <- cbind(Est=coef(Pfitmqf2),confint(Pbootmqf2))
## 4. Summary results
Tcfmqf2 <- rbind(Tcfmqf2,c(AIC(Tfitmqf2),NA,NA),c(deviance(Tfitmqf2),NA,NA))
Scfmqf2 <- rbind(Scfmqf2,c(AIC(Sfitmqf2),NA,NA),c(deviance(Sfitmqf2),NA,NA))
Pcfmqf2 <- rbind(Pcfmqf2,c(AIC(Pfitmqf2),NA,NA),c(deviance(Pfitmqf2),NA,NA))
rownames(Tcfmqf2)[4:5] <- rownames(Scfmqf2)[6:7] <- rownames(Pcfmqf2)[6:7] <- c("AIC","RSS")
print(round(Tcfmqf2,2),na.print="-")
print(round(Scfmqf2,2),na.print="-")
print(round(Pcfmqf2,2),na.print="-")
################################################################################
## Example analysis with the Mosquitofish data -- Site 4
################################################################################
mqf4 <- subset(Mosquitofish,sitenum==4)
## 1. Fit typical model
Tsvmqf4 <- list(Linf=45,K=1.2,t0=-0.2)
Tfitmqf4 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf4,
start=Tsvmqf4,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
Tbootmqf4 <- nlsBoot(Tfitmqf4)
Tcfmqf4 <- cbind(Est=coef(Tfitmqf4),confint(Tbootmqf4))
## 2. Fit Somers function with C unconstrained (as in Carmona-Catot et al. (2014))
Ssvmqf4 <- list(Linf=40,K=0.9,t0=-0.5,C=1.2,ts=0.9)
Sfitmqf4 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf4,
start=Ssvmqf4,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
Sbootmqf4 <- nlsBoot(Sfitmqf4)
Scfmqf4 <- cbind(Est=coef(Sfitmqf4),confint(Sbootmqf4))
## 3. Fit new Pauly et al. (1992) function
Psvmqf4 <- list(Linf=40,Kpr=1.5,t0=0,ts=0.7,NGT=0.3)
Pfitmqf4 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf4,
start=Psvmqf4,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
Pbootmqf4 <- nlsBoot(Pfitmqf4)
Pcfmqf4 <- cbind(Est=coef(Pfitmqf4),confint(Pbootmqf4))
## 4. Summary results
Tcfmqf4 <- rbind(Tcfmqf4,c(AIC(Tfitmqf4),NA,NA),c(deviance(Tfitmqf4),NA,NA))
Scfmqf4 <- rbind(Scfmqf4,c(AIC(Sfitmqf4),NA,NA),c(deviance(Sfitmqf4),NA,NA))
Pcfmqf4 <- rbind(Pcfmqf4,c(AIC(Pfitmqf4),NA,NA),c(deviance(Pfitmqf4),NA,NA))
rownames(Tcfmqf4)[4:5] <- rownames(Scfmqf4)[6:7] <- rownames(Pcfmqf4)[6:7] <- c("AIC","RSS")
print(round(Tcfmqf4,2),na.print="-")
print(round(Scfmqf4,2),na.print="-")
print(round(Pcfmqf4,2),na.print="-")
################################################################################
## Example analysis with the Mosquitofish data -- Site 9
################################################################################
mqf9 <- subset(Mosquitofish,sitenum==9)
## 1. Fit typical model
Tsvmqf9 <- list(Linf=40,K=1.5,t0=0)
Tfitmqf9 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf9,
start=Tsvmqf9,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
Tbootmqf9 <- nlsBoot(Tfitmqf9)
Tcfmqf9 <- cbind(Est=coef(Tfitmqf9),confint(Tbootmqf9))
## 2. Fit Somers function with C unconstrained (as in Carmona-Catot et al. (2014))
Ssvmqf9 <- list(Linf=40,K=1.2,t0=-0.2,C=0.6,ts=0.85)
Sfitmqf9 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf9,
start=Ssvmqf9,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
Sbootmqf9 <- nlsBoot(Sfitmqf9)
Scfmqf9 <- cbind(Est=coef(Sfitmqf9),confint(Sbootmqf9))
## 3. Fit new Pauly et al. (1992) function
Psvmqf9 <- list(Linf=40,Kpr=1,t0=-0.2,ts=0.6,NGT=0.05)
Pfitmqf9 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf9,
start=Psvmqf9,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
Pbootmqf9 <- nlsBoot(Pfitmqf9)
Pcfmqf9 <- cbind(Est=coef(Pfitmqf9),confint(Pbootmqf9))
## 4. Summary results
Tcfmqf9 <- rbind(Tcfmqf9,c(AIC(Tfitmqf9),NA,NA),c(deviance(Tfitmqf9),NA,NA))
Scfmqf9 <- rbind(Scfmqf9,c(AIC(Sfitmqf9),NA,NA),c(deviance(Sfitmqf9),NA,NA))
Pcfmqf9 <- rbind(Pcfmqf9,c(AIC(Pfitmqf9),NA,NA),c(deviance(Pfitmqf9),NA,NA))
rownames(Tcfmqf4)[4:5] <- rownames(Scfmqf9)[6:7] <- rownames(Pcfmqf9)[6:7] <- c("AIC","RSS")
print(round(Tcfmqf9,2),na.print="-")
print(round(Scfmqf9,2),na.print="-")
print(round(Pcfmqf9,2),na.print="-")
################################################################################
################################################################################
## Create Figures
################################################################################
################################################################################
## Demo of the Somers model
pdf("results/Figure_1.PDF",width=4,height=4)
par(xaxs="i",yaxs="i",mar=c(3,3,0.6,0.6),mgp=c(1.7,.4,0),tcl=-0.2,las=1,cex=0.9)
ts <- 0.05; Linf <- 30; K <- 0.3; t0 <- -0.1
C <- c(0,0.5,1,2)
clr <- col2rgbt(rep("black",4),1/(1:4))
t <- seq(0,3,length.out=299)
plot(vbSO(t,Linf=c(Linf,K,t0,C=C[1],ts))~t,type="l",lwd=2,col=clr[1],
ylim=c(0,20),ylab="Mean Length",xlab="Age (years)",xaxt="n")
axis(1,0:3)
lines(vbSO(t,Linf=c(Linf,K,t0,C=C[2],ts))~t,lwd=2,col=clr[2])
lines(vbSO(t,Linf=c(Linf,K,t0,C=C[3],ts))~t,lwd=2,col=clr[3])
lines(vbSO(t,Linf=c(Linf,K,t0,C=C[4],ts))~t,lwd=2,col=clr[4])
legend("topleft",paste("C",C,sep="="),lwd=2,col=clr,bty="n",inset=-0.02)
dev.off()
## Demo of the Pauly et al. (1992) function
pdf("results/Figure_2.PDF",width=4,height=4)
par(xaxs="i",yaxs="i",mar=c(3,3,0.6,0.6),mgp=c(1.7,.4,0),tcl=-0.2,las=1,cex=0.9)
Kpr <- 0.35; NGT <- 0.3
Pcf <- c(Linf,Kpr,t0,ts,NGT)
t <- seq(-0.1,3.3,length.out=499)
PL <- vbPA(t,Linf=Pcf)
plot(c(-1,-1),xlim=c(-0.1,3.5),ylim=c(0,22),xaxt="n",
ylab="Length",xlab="Age (years)")
WPs <- 0:2+ts+0.5
LatWPs <- vbPA(WPs,Linf=Pcf)
SNGTs <- WPs-NGT/2
ENGTs <- WPs+NGT/2
for (i in 1:length(SNGTs))
polygon(c(SNGTs[i],SNGTs[i],ENGTs[i],ENGTs[i]),c(0,LatWPs[i],LatWPs[i],0),
col=col2rgbt("black",1/20),border=NA)
arrows(WPs,LatWPs-2,WPs,LatWPs-0.2,lwd=2,length=0.1)
arrows(SNGTs,LatWPs-1.2,ENGTs,LatWPs-1.2,lwd=2,length=0.025,angle=90,code=3)
lines(PL~t,lwd=2)
tss <- 0:3+ts
Lattss <- vbPA(tss,Linf=Pcf)
points(tss,Lattss,pch=16,col="gray50",cex=1.1)
axis(1,0:3)
axis(1,SNGTs,tcl=-0.2)
axis(1,ENGTs,tcl=-0.2)
# makes inside ticks
axis(1,at=c(0:3,SNGTs,ENGTs),labels=NA,tcl=0.2)
axis(1,at=1:3,labels=FSA:::iCalc_tpr(1:3,ts,NGT),tcl=0.2,line=-1.7,lwd=0)
axis(1,at=SNGTs,labels=FSA:::iCalc_tpr(SNGTs,ts,NGT),tcl=0.2,line=-1.7,lwd=0)
axis(1,at=ENGTs,labels=FSA:::iCalc_tpr(ENGTs,ts,NGT),tcl=0.2,line=-1.7,lwd=0)
axis(1,at=3.3,labels="<= t",tick=FALSE)
axis(1,at=3.3,labels="<= t'",tick=FALSE,line=-1.7,lwd=0)
dev.off()
## Summary of model fits
pdf("results/Figure_3.PDF",width=8,height=8)
par(mfrow=c(2,2),xaxs="i",yaxs="i",mar=c(3,3,0.6,0.6),mgp=c(1.7,.4,0),tcl=-0.2,las=1)
plot(fl~age,data=Bonito,pch=19,col=col2rgbt("black",1/4),
xlab="Age (years)",ylab="Fork Length (mm)",xlim=c(0,4),ylim=c(0,70))
curve(vbTyp(x,coef(TfitBon)),from=0,to=4,lwd=4,add=TRUE,col="gray25",lty=2)
curve(vbPA(x,coef(PfitBon)),from=0,to=4,lwd=4,add=TRUE)
curve(vbSO(x,coef(SfitBon)),from=0,to=4,lwd=2,add=TRUE,col="gray50")
text(grconvertX(0.08,"npc","user"),grconvertY(0.92,"npc","user"),"A",cex=1.5)
plot(sl~age2,data=mqf2,pch=19,col=col2rgbt("black",1/6),xaxt="n",
xlab="Age (years)",ylab="Standard Length (mm)",xlim=c(0,2.4),ylim=c(0,50))
axis(1,0:2)
curve(vbTyp(x,coef(Tfitmqf2)),from=0,to=3,lwd=4,add=TRUE,col="gray25",lty=2)
curve(vbPA(x,coef(Pfitmqf2)),from=0,to=3,lwd=4,add=TRUE)
curve(vbSO(x,coef(Sfitmqf2)),from=0,to=3,lwd=2,add=TRUE,col="gray50")
text(grconvertX(0.08,"npc","user"),grconvertY(0.92,"npc","user"),"B",cex=1.5)
plot(sl~age2,data=mqf4,pch=19,col=col2rgbt("black",1/6),xaxt="n",
xlab="Age (years)",ylab="Standard Length (mm)",xlim=c(0,2.4),ylim=c(0,50))
axis(1,0:2)
curve(vbTyp(x,coef(Tfitmqf4)),from=0,to=3,lwd=4,add=TRUE,col="gray25",lty=2)
curve(vbPA(x,coef(Pfitmqf4)),from=0,to=3,lwd=4,add=TRUE)
curve(vbSO(x,coef(Sfitmqf4)),from=0,to=3,lwd=2,add=TRUE,col="gray50")
text(grconvertX(0.08,"npc","user"),grconvertY(0.92,"npc","user"),"C",cex=1.5)
plot(sl~age2,data=mqf9,pch=19,col=col2rgbt("black",1/6),xaxt="n",
xlab="Age (years)",ylab="Standard Length (mm)",xlim=c(0,2.4),ylim=c(0,50))
axis(1,0:2)
curve(vbTyp(x,coef(Tfitmqf9)),from=0,to=3,lwd=4,add=TRUE,col="gray25",lty=2)
curve(vbPA(x,coef(Pfitmqf9)),from=0,to=3,lwd=4,add=TRUE)
curve(vbSO(x,coef(Sfitmqf9)),from=0,to=3,lwd=2,add=TRUE,col="gray50")
text(grconvertX(0.08,"npc","user"),grconvertY(0.92,"npc","user"),"D",cex=1.5)
dev.off()
################################################################################
################################################################################
## Testing different starting values for model fits ... per reviewer request
## Just checking for convergence and relationship to parameter estimates
## from the fits above.
##
## Some of these models failed to converge on a 64-bit Mac (El Capitan OS). As
## this code is used only to demonstrate that a globabl minimum was found, I
## did not work to find starting values that led to convergence on all OS. In
## my testing, the PfitBon1 and Pfitmqf42 models did not converged on that Mac.
##
################################################################################
################################################################################
# Bonito
TsvBon1 <- list(Linf=60,K=0.3,t0=0)
TfitBon1 <- nls(fl~vbTyp(age,Linf,K,t0),data=Bonito,
start=TsvBon1,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
TsvBon2 <- list(Linf=90,K=0.6,t0=0)
TfitBon2 <- nls(fl~vbTyp(age,Linf,K,t0),data=Bonito,
start=TsvBon2,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
TsvBon3 <- list(Linf=80,K=0.1,t0=-2)
TfitBon3 <- nls(fl~vbTyp(age,Linf,K,t0),data=Bonito,
start=TsvBon3,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
cbind(TsvBon,TsvBon1,TsvBon2,TsvBon3) # The starting values
round(cbind(coef(TfitBon),coef(TfitBon1),coef(TfitBon2),
coef(TfitBon2)),4) # Parameter estimates look good (i.e., comparable)
SsvBon1 <- list(Linf=60,K=0.3,t0=-2,C=0.3,ts=0.1)
SfitBon1 <- nls(fl~vbSO(age,Linf,K,t0,C,ts),data=Bonito,
start=SsvBon1,lower=Slwrbnd,upper=SuprbndB,
algorithm="port",control=ctrl)
SsvBon2 <- list(Linf=40,K=0.1,t0=0,C=0.7,ts=0.3)
SfitBon2 <- nls(fl~vbSO(age,Linf,K,t0,C,ts),data=Bonito,
start=SsvBon2,lower=Slwrbnd,upper=SuprbndB,
algorithm="port",control=ctrl)
SsvBon3 <- list(Linf=60,K=0.5,t0=0,C=0.5,ts=0.5)
SfitBon3 <- nls(fl~vbSO(age,Linf,K,t0,C,ts),data=Bonito,
start=SsvBon3,lower=Slwrbnd,upper=SuprbndB,
algorithm="port",control=ctrl)
cbind(SsvBon,SsvBon1,SsvBon2,SsvBon3) # The starting values
round(cbind(coef(SfitBon),coef(SfitBon1),coef(SfitBon2),
coef(SfitBon3)),4) # Parameter estimates look good (i.e., comparable)
PsvBon1 <- list(Linf=60,Kpr=0.2,t0=0,ts=0.1,NGT=0.1)
PfitBon1 <- nls(fl~vbPA(age,Linf,Kpr,t0,ts,NGT),data=Bonito,
start=PsvBon1,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
PsvBon2 <- list(Linf=50,Kpr=0.7,t0=-1,ts=0.3,NGT=0.5)
PfitBon2 <- nls(fl~vbPA(age,Linf,Kpr,t0,ts,NGT),data=Bonito,
start=PsvBon2,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
PsvBon3 <- list(Linf=70,Kpr=0.5,t0=-1,ts=0.5,NGT=0.5)
PfitBon3 <- nls(fl~vbPA(age,Linf,Kpr,t0,ts,NGT),data=Bonito,
start=PsvBon3,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
cbind(PsvBon,PsvBon1,PsvBon2,PsvBon3) # Starting values
round(cbind(coef(PfitBon),coef(PfitBon1),coef(PfitBon2),
coef(PfitBon3)),4) # Parameter estimates look good (i.e., comparable)
# Mosquitofish Site 2
Tsvmqf21 <- list(Linf=60,K=0.3,t0=0)
Tfitmqf21 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf2,
start=Tsvmqf21,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
Tsvmqf22 <- list(Linf=90,K=0.6,t0=0)
Tfitmqf22 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf2,
start=Tsvmqf22,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
Tsvmqf23 <- list(Linf=80,K=0.1,t0=-2)
Tfitmqf23 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf2,
start=Tsvmqf23,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
cbind(Tsvmqf2,Tsvmqf21,Tsvmqf22,Tsvmqf23) # The starting values
round(cbind(coef(Tfitmqf2),coef(Tfitmqf21),coef(Tfitmqf22),
coef(Tfitmqf22)),4) # Parameter estimates look good (i.e., comparable)
Ssvmqf21 <- list(Linf=60,K=0.3,t0=-2,C=1,ts=0.9)
Sfitmqf21 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf2,
start=Ssvmqf21,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
Ssvmqf22 <- list(Linf=40,K=0.1,t0=0,C=1.9,ts=0.7)
Sfitmqf22 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf2,
start=Ssvmqf22,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
Ssvmqf23 <- list(Linf=40,K=0.5,t0=0,C=1.4,ts=0.7)
Sfitmqf23 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf2,
start=Ssvmqf23,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
cbind(Ssvmqf2,Ssvmqf21,Ssvmqf22,Ssvmqf23) # The starting values
round(cbind(coef(Sfitmqf2),coef(Sfitmqf21),coef(Sfitmqf22),
coef(Sfitmqf23)),4) # Parameter estimates look good (i.e., comparable)
# Some starting values did produce different estimates, but fit was worse. e.g.,
# Ssvmqf2Z <- list(Linf=60,K=0.5,t0=-2,C=1.3,ts=0.5)
# Sfitmqf2Z <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf2,start=Ssvmqf2Z,lower=Slwrbnd,upper=SuprbndM,algorithm="port",control=ctrl)
# coef(Sfitmqf2Z)
# deviance(Sfitmqf2Z)
Psvmqf21 <- list(Linf=60,Kpr=0.2,t0=0,ts=0.7,NGT=0.1)
Pfitmqf21 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf2,
start=Psvmqf21,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
Psvmqf22 <- list(Linf=40,Kpr=0.4,t0=-1,ts=0.7,NGT=0.5)
Pfitmqf22 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf2,
start=Psvmqf22,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
Psvmqf23 <- list(Linf=60,Kpr=0.5,t0=0,ts=0.5,NGT=0.3)
Pfitmqf23 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf2,
start=Psvmqf23,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
cbind(Psvmqf2,Psvmqf21,Psvmqf22,Psvmqf23) # Starting values
round(cbind(coef(Pfitmqf2),coef(Pfitmqf21),coef(Pfitmqf22),
coef(Pfitmqf23)),4) # Parameter estimates look good (i.e., comparable)
# Mosquitofish Site 4
Tsvmqf41 <- list(Linf=60,K=0.3,t0=0)
Tfitmqf41 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf4,
start=Tsvmqf4,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
Tsvmqf42 <- list(Linf=90,K=0.6,t0=0)
Tfitmqf42 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf4,
start=Tsvmqf42,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
Tsvmqf43 <- list(Linf=80,K=0.1,t0=-2)
Tfitmqf43 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf4,
start=Tsvmqf43,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
cbind(Tsvmqf4,Tsvmqf41,Tsvmqf42,Tsvmqf43) # The starting values
round(cbind(coef(Tfitmqf4),coef(Tfitmqf41),coef(Tfitmqf42),
coef(Tfitmqf42)),4) # Parameter estimates look good (i.e., comparable)
Ssvmqf41 <- list(Linf=60,K=0.3,t0=0,C=1,ts=0.9)
Sfitmqf41 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf4,
start=Ssvmqf41,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
Ssvmqf42 <- list(Linf=60,K=0.9,t0=0,C=2,ts=0.8)
Sfitmqf42 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf4,
start=Ssvmqf42,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
Ssvmqf43 <- list(Linf=40,K=0.9,t0=0,C=1,ts=0.8)
Sfitmqf43 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf4,
start=Ssvmqf43,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
cbind(Ssvmqf4,Ssvmqf41,Ssvmqf42,Ssvmqf43) # The starting values
round(cbind(coef(Sfitmqf4),coef(Sfitmqf41),coef(Sfitmqf42),
coef(Sfitmqf43)),4) # Parameter estimates look good (i.e., comparable)
# Seems very sensitive to choice of ts (in terms of convergence)
Psvmqf41 <- list(Linf=60,Kpr=0.4,t0=-2,ts=0.7,NGT=0.5)
Pfitmqf41 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf4,
start=Psvmqf41,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
Psvmqf42 <- list(Linf=40,Kpr=0.3,t0=0,ts=0.9,NGT=0.7)
Pfitmqf42 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf4,
start=Psvmqf42,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
Psvmqf43 <- list(Linf=50,Kpr=0.2,t0=-2,ts=0.7,NGT=0.7)
Pfitmqf43 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf4,
start=Psvmqf43,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
cbind(Psvmqf4,Psvmqf41,Psvmqf42,Psvmqf43) # Starting values
round(cbind(coef(Pfitmqf4),coef(Pfitmqf41),coef(Pfitmqf42),
coef(Pfitmqf43)),4) # Parameter estimates look good (i.e., comparable)
# Mosquitofish Site 9
Tsvmqf91 <- list(Linf=60,K=0.3,t0=0)
Tfitmqf91 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf9,
start=Tsvmqf9,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
Tsvmqf92 <- list(Linf=90,K=0.6,t0=0)
Tfitmqf92 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf9,
start=Tsvmqf92,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
Tsvmqf93 <- list(Linf=80,K=0.1,t0=-2)
Tfitmqf93 <- nls(sl~vbTyp(age2,Linf,K,t0),data=mqf9,
start=Tsvmqf93,lower=Tlwrbnd,upper=Tuprbnd,
algorithm="port",control=ctrl)
cbind(Tsvmqf9,Tsvmqf91,Tsvmqf92,Tsvmqf93) # The starting values
round(cbind(coef(Tfitmqf9),coef(Tfitmqf91),coef(Tfitmqf92),
coef(Tfitmqf92)),4) # Parameter estimates look good (i.e., comparable)
Ssvmqf91 <- list(Linf=60,K=0.3,t0=-1,C=0.4,ts=0.8)
Sfitmqf91 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf9,
start=Ssvmqf91,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
Ssvmqf92 <- list(Linf=30,K=0.1,t0=-1,C=0.4,ts=0.9)
Sfitmqf92 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf9,
start=Ssvmqf92,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
Ssvmqf93 <- list(Linf=60,K=0.2,t0=-1,C=0.2,ts=0.6)
Sfitmqf93 <- nls(sl~vbSO(age2,Linf,K,t0,C,ts),data=mqf9,
start=Ssvmqf93,lower=Slwrbnd,upper=SuprbndM,
algorithm="port",control=ctrl)
cbind(Ssvmqf9,Ssvmqf91,Ssvmqf92,Ssvmqf93) # The starting values
round(cbind(coef(Sfitmqf9),coef(Sfitmqf91),coef(Sfitmqf92),
coef(Sfitmqf93)),4) # Parameter estimates look good (i.e., comparable)
# Difficult time finding starting values that led to convergence
Psvmqf91 <- list(Linf=40,Kpr=0.1,t0=0,ts=0.65,NGT=0.25)
Pfitmqf91 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf9,
start=Psvmqf91,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
Psvmqf92 <- list(Linf=60,Kpr=0.3,t0=-1,ts=0.95,NGT=0.5)
Pfitmqf92 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf9,
start=Psvmqf92,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
Psvmqf93 <- list(Linf=40,Kpr=0.2,t0=-1,ts=0.5,NGT=0.5)
Pfitmqf93 <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf9,
start=Psvmqf93,lower=Plwrbnd,upper=Puprbnd,
algorithm="port",control=ctrl)
cbind(Psvmqf9,Psvmqf91,Psvmqf92,Psvmqf93) # Starting values
round(cbind(coef(Pfitmqf9),coef(Pfitmqf91),coef(Pfitmqf92),
coef(Pfitmqf93)),4) # Parameter estimates look good (i.e., comparable)
# Some starting values produced different estimates, but fit was worse. e.g.,
# Psvmqf9Z <- list(Linf=60,Kpr=0.3,t0=0,ts=0.95,NGT=0.5)
# Pfitmqf9Z <- nls(sl~vbPA(age2,Linf,Kpr,t0,ts,NGT),data=mqf9,start=Psvmqf9Z,lower=Plwrbnd,upper=Puprbnd,algorithm="port",control=ctrl)
# coef(Pfitmqf9Z)
# deviance(Pfitmqf9Z)
|
# cacheMatrixTests
source("cachematrix.R")
# -----------------------------------------------------------------------------
# Test: makeCacheMatrix()
# -----------------------------------------------------------------------------
## Test the function with the default empty matrix
x = makeCacheMatrix()
# get the empty matrix
x$get()
# get the inverse
x$get_inverse()
# set the inverse, even though wrong
x$set_inverse(matrix())
# get the inverse
x$get_inverse()
## Change the original matrix to a 1 by 1 matrix
x$set(matrix(c(1)))
# Get the inverse, still not set
x$get_inverse()
# Get the matrix
x$get()
# Calculate the inverse
my_inverse <- solve(x$get())
my_inverse
# set the inverse
x$set_inverse(my_inverse)
# get this inverse
x$get_inverse()
## Change the original matrix to a 4 x 4 matrix
new_matrix <- matrix(rnorm(16), ncol=4)
new_matrix
# Solve the matrix
new_inverse <- solve(new_matrix)
new_inverse
# set and then get the x to the new matrix
x$set(new_matrix)
x$get()
identical(x$get(), new_matrix)
# set and then get the new inverse
x$set_inverse(new_inverse)
x$get_inverse()
identical(x$get_inverse(), new_inverse)
# -----------------------------------------------------------------------------
# Test: cacheSolve()
# -----------------------------------------------------------------------------
## Test the function with the default empty matrix
x = makeCacheMatrix()
# Get the matrix inverse, remember, NA's return more NA's
cacheSolve(x)
# get it again
for (i in 1:10) cacheSolve(x)
## Test the function with the default empty matrix
x = makeCacheMatrix(matrix(c(1, 2, 3, 4), nrow = 2))
# Get the matrix inverse, remember, NA's return more NA's
cacheSolve(x)
# get it again
cacheSolve(x)
# make sure keeps working
for (i in 1:20) {
print(cacheSolve(x))
}
# test repeatedly
x$set(matrix(rnorm(36), nrow = 6))
for (i in 1:20) {
print(cacheSolve(x))
}
|
/cacheMatrixTest.R
|
no_license
|
Burkehaven/ProgrammingAssignment2
|
R
| false
| false
| 1,904
|
r
|
# cacheMatrixTests
source("cachematrix.R")
# -----------------------------------------------------------------------------
# Test: makeCacheMatrix()
# -----------------------------------------------------------------------------
## Test the function with the default empty matrix
x = makeCacheMatrix()
# get the empty matrix
x$get()
# get the inverse
x$get_inverse()
# set the inverse, even though wrong
x$set_inverse(matrix())
# get the inverse
x$get_inverse()
## Change the original matrix to a 1 by 1 matrix
x$set(matrix(c(1)))
# Get the inverse, still not set
x$get_inverse()
# Get the matrix
x$get()
# Calculate the inverse
my_inverse <- solve(x$get())
my_inverse
# set the inverse
x$set_inverse(my_inverse)
# get this inverse
x$get_inverse()
## Change the original matrix to a 4 x 4 matrix
new_matrix <- matrix(rnorm(16), ncol=4)
new_matrix
# Solve the matrix
new_inverse <- solve(new_matrix)
new_inverse
# set and then get the x to the new matrix
x$set(new_matrix)
x$get()
identical(x$get(), new_matrix)
# set and then get the new inverse
x$set_inverse(new_inverse)
x$get_inverse()
identical(x$get_inverse(), new_inverse)
# -----------------------------------------------------------------------------
# Test: cacheSolve()
# -----------------------------------------------------------------------------
## Test the function with the default empty matrix
x = makeCacheMatrix()
# Get the matrix inverse, remember, NA's return more NA's
cacheSolve(x)
# get it again
for (i in 1:10) cacheSolve(x)
## Test the function with the default empty matrix
x = makeCacheMatrix(matrix(c(1, 2, 3, 4), nrow = 2))
# Get the matrix inverse, remember, NA's return more NA's
cacheSolve(x)
# get it again
cacheSolve(x)
# make sure keeps working
for (i in 1:20) {
print(cacheSolve(x))
}
# test repeatedly
x$set(matrix(rnorm(36), nrow = 6))
for (i in 1:20) {
print(cacheSolve(x))
}
|
# So here we'll try and get all the data in the correct structure for the spatial model.
library(SEBDAM)
library(tidyverse)
library(sf)
library(stringr)
library(optimx)
library(parallel)
library(INLA)
library(ggthemes)
library(cowplot)
# Download the function to go from inla to sf
funs <- c("https://raw.githubusercontent.com/Mar-Scal/Assessment_fns/master/Fishery/logs_and_fishery_data.r",
"https://raw.githubusercontent.com/Mar-Scal/Assessment_fns/master/Maps/pectinid_projector_sf.R",
"https://raw.githubusercontent.com/Mar-Scal/Assessment_fns/master/Maps/convert_inla_mesh_to_sf.R"
)
# Now run through a quick loop to load each one, just be sure that your working directory is read/write!
for(fun in funs)
{
download.file(fun,destfile = basename(fun))
source(paste0(getwd(),"/",basename(fun)))
file.remove(paste0(getwd(),"/",basename(fun)))
}
# Get the Sab area outline...
Sab.shape <- st_read("D:/Github/GIS_layers/survey_boundaries/Sab.shp", quiet=T)
# Bring in the survey data from the NAS
# load("Y:/Offshore/Assessment/Data/Survey_data/2022/Survey_summary_output/Survey_all_results.Rdata")
# surv.dat <- surv.dat$Sab
# saveRDS(surv.dat,'D:/Github/BBn_model/Results/Sab_surv.dat.RDS')
surv.dat <- readRDS('D:/Github/BBn_model/Results/Sab_surv.dat.RDS')
# Need to get condition factor out of here too
#mod.dat <- survey.obj$Sab$model.dat
#saveRDS(mod.dat,'D:/Github/BBn_model/Results/Sab_model.dat.RDS')
mod.dat <- readRDS('D:/Github/BBn_model/Results/Sab_model.dat.RDS')
#load("F:/NAS/Offshore/Assessment/Data/Model/2022/Sab/Model_input_midpoint.RData")
# Bring in the fishery data
# logs_and_fish(loc="offshore",year = 1986:2022,direct="Y:/Offshore/Assessment/", get.marfis=F)
# fish.dat<-merge(new.log.dat,old.log.dat,all=T)
# fish.dat$ID<-1:nrow(fish.dat)
# Now we can clip both of these to subset it to the data that I think we need for the analysis....
# First the fishery data
# Sab.fish <- fish.dat %>% dplyr::filter(bank == "Sab")
# # There are 12 data points at 0,0 that we remove, I'm not worried about accounting for these 12 points!
# Sab.fish <- Sab.fish %>% dplyr::filter(lat !=0 | lon != 0)
# # Now I want to put a 'survey year' on these because that's what we're gonna need for our modelling... start by porting over the year
# Sab.fish$survey.year <- Sab.fish$year
# # DK NOTE: Now this is going to get confusing for us and we may want to tweak SEBDAM for this, but that's a down the road job, not a playing around with model job
# # But based on the indexing in SEBDAM, I am going to change how we index the survey year data from what we have done with offshore traditionally.
# # Historically anything from the last half of the year goes into the following years, eg. survey.year 2002 = June 2001- May 2002.
# # But in SEBDAM we have (B(t-1) - C(t-1)), so let's say we have year 2000 survey biomass, this says we remove the 2000 catch from that
# # we want that catch to be the catch from June 2000 to May 2001, i.e. we remove the catch before we allow the population to grow
# # This is what we do in our current model, but we have a different index (C(t) on our model.
# # Basically survey year 2002 = June 2002 - May 2003 now
# #DK note: We probably should think more about the survey year fun and how exactly we want to handle removal of catch in our models.
# Sab.fish$month <- lubridate::month(Sab.fish$date)
# Sab.fish$survey.year[Sab.fish$month %in% c("January","February","March","April","May")] <- Sab.fish$survey.year[Sab.fish$month %in% c("January","February","March","April","May")] -1
# # Add a fake 2022 data point as there were no removals in 2022
# Sab.fish[nrow(Sab.fish)+1,] <- NA
# Sab.fish$pro.repwt[nrow(Sab.fish)] <- 0
# Sab.fish$year[nrow(Sab.fish)] <- 2022
# Sab.fish$survey.year[nrow(Sab.fish)] <-2022
# Sab.fish$lon[nrow(Sab.fish)] <- -61.68767
# Sab.fish$lat[nrow(Sab.fish)] <- 43.63017
#saveRDS(Sab.fish,'D:/Github/BBn_model/Results/Fishery_data/Sab_fish.dat.RDS')
Sab.fish <- readRDS('D:/Github/BBn_model/Results/Fishery_data/Sab_fish.dat.RDS')
Sab.fish$pro.repwt <- Sab.fish$pro.repwt/1000 # It looks like what I saved is already in tonnes.
# Set up some stuff
repo.loc <- "D:/Github/BBn_model/"
mod.select <- "TLM"
atow<-800*2.4384/10^6 # area of standard tow in km2
num.knots <- 4
years <- 1995:2022
NY <- length(years)
c_sys <- 32620
# Transform Sable to 32620
Sab.shape <- Sab.shape %>% st_transform(crs = c_sys) # Sab is totally in 32620 border so think they are basically equivalent options here
# Just going to use the core area to see if that helps model and the prediction function...
#Sab.tst <- st_cast(Sab.shape, "POLYGON")
#Sab.shape <-Sab.tst[1,]
# OK, so step 1 here is getting the model input that Raphael needs for the model
# The survey data....
live.subset <- surv.dat %>% dplyr::filter(state == 'live')
dead.subset <- surv.dat %>% dplyr::filter(state== "dead")
live.input <- data.frame(I = live.subset$com.bm, IR = live.subset$rec.bm,year = live.subset$year,tow = live.subset$tow,tot.live.com = live.subset$com,lat = live.subset$lat,lon=live.subset$lon)
# Here's an option to use different size classes to see what happens...
# Here is one where we expand the recruit sizes to be 70-90 mm
#live.input <- data.frame(I = live.subset$com.bm, IR = (live.subset$`bin_70-80_bm` +live.subset$`bin_80-90_bm`) ,year = live.subset$year,tow = live.subset$tow,tot.live.com = live.subset$com,lat = live.subset$lat,lon=live.subset$lon)
# Here is one where 50-80 is recruits and 80+ is Fully recruited
# live.input <- data.frame(I = (live.subset$`bin_90-120_bm` + live.subset$`bin_120_plus_bm` + live.subset$`bin_80-90_bm`),
# IR = (live.subset$`bin_50-70_bm` + live.subset$`bin_70-80_bm`),
# year = live.subset$year,tow = live.subset$tow,tot.live.com = live.subset$com,lat = live.subset$lat,lon=live.subset$lon)
clap.input <- data.frame(L = dead.subset$com,tow = dead.subset$tow,year = dead.subset$year)
mod.input <- left_join(live.input,clap.input,by=c('tow','year'))
mod.input$N <- round(mod.input$tot.live.com + mod.input$L)
# Looks like there are no values > 0 but < 0.5, so the low clapper numbers should all round up to 1 (which makes sense as you'd only get < 0.5 if we had tows twice as long as they should be)
mod.input$L <- round(mod.input$L)
mod.input.sf <- st_as_sf(mod.input,coords = c('lon','lat'),remove=F,crs = 4326)
mod.input.sf <- mod.input.sf %>% st_transform(crs=c_sys)
mod.input.sf <- mod.input.sf %>% dplyr::filter(year %in% years)
mod.input.sf <- st_intersection(mod.input.sf,Sab.shape)
#mod.input.sf[nrow(mod.input.sf)+1,] <- mod.input.sf[nrow(mod.input.sf),]
#mod.input.sf$year[nrow(mod.input.sf)] <- 2015
# Now I need to get the I and IR into kg/km^2
mod.input.sf$Year <- mod.input.sf$year - (min(years)-1)
mod.input.sf$I <- mod.input.sf$I/atow
mod.input.sf$IR <- mod.input.sf$IR/atow
#survey.obj$Sab$model.dat
# Adding missing survey years. THe L and N both need 'data', but 0's are fine, so that'll do the trick!
mod.input.sf[nrow(mod.input.sf)+1,] <- mod.input.sf[nrow(mod.input.sf),]
#mod.input.sf[nrow(mod.input.sf),] <- mod.input.sf[nrow(mod.input.sf)-1,]
mod.input.sf$year[nrow(mod.input.sf)] <- 2015
mod.input.sf$Year[nrow(mod.input.sf)] <- which(years == 2015)
mod.input.sf$I[nrow(mod.input.sf)] <- NA
mod.input.sf$IR[nrow(mod.input.sf)] <- NA
mod.input.sf$tot.live.com[nrow(mod.input.sf)] <- NA
mod.input.sf$L[nrow(mod.input.sf)] <- 0
mod.input.sf$N[nrow(mod.input.sf)] <- 0
# And repeat for 2020
mod.input.sf[nrow(mod.input.sf)+1,] <- mod.input.sf[nrow(mod.input.sf),]
#mod.input.sf[nrow(mod.input.sf),] <- mod.input.sf[nrow(mod.input.sf)-1,]
mod.input.sf$year[nrow(mod.input.sf)] <- 2020
mod.input.sf$Year[nrow(mod.input.sf)] <- which(years == 2020)
mod.input.sf$I[nrow(mod.input.sf)] <- NA
mod.input.sf$IR[nrow(mod.input.sf)] <- NA
mod.input.sf$tot.live.com[nrow(mod.input.sf)] <- NA
mod.input.sf$L[nrow(mod.input.sf)] <- 0
mod.input.sf$N[nrow(mod.input.sf)] <- 0
# Now clip mod.input.sf to the right nubmer of years...
mod.input.sf <- mod.input.sf %>% dplyr::filter(year %in% years)
# Growth!!
mod.growth.dat <- mod.dat
# # Grab the growth data, we have ageing data from 1980's that I'm going to use to calculate growth here.
# Data is coming from ageing data in 1989, found here.... Y:\Offshore\Assessment\Data\Ageing\archive\old_ageing_from_Amy_2022\SAB height at age 1989_2.pdf
L.inf <- 136.628
#to <- 1.337 # So this uses a 1 year offset that we no longer believe in, going to make this 0.337 to align more with what we now do...
to <- 0.337
K <- 0.2269
# This is weight in this year, which becomes t-1
waa.tm1 <- mod.growth.dat$CF*(mod.growth.dat$l.bar/100)^3 # Average(ish) weight of commerical sized scallop in the current year
# Using this years average shell height we can find the exptected shell height for the scallops in the next year
# ht = (Linf * (1-exp(-K)) + exp(-K) * height(last year))
# laa.t is the projected size of the current years scallops into next year.
laa.t <- L.inf*(1-exp(-K)) + exp(-K) * mod.growth.dat$l.bar
# The c() term in the below offsets the condition so that current year's condition slots into the previous year and repeats
# the condition for the final year), this effectively lines up "next year's condition" with "predictied shell height next year (laa.t)
# This gets us the predicted weight of the current crop of scallops next year based on next years CF * laa.t^3
# Of course we don't have next years condition thus th last condition is simply repeated
# waa.t is using the condition from next year and the growth from next year to get next years weight
waa.t <- c(mod.growth.dat$CF[-1],mod.growth.dat$CF[nrow(mod.growth.dat)])*(laa.t/100)^3
# Here we use the current condition factor to calculate the weight next year (since we use laa.t)
# That's really the only difference between waa.t and waa.t2, waa.t uses next years condition to project growth
# what waa.t2 uses the current condition to project growth. So that's really what we are comparing here with these
# two growth metrics isn't it, this is really just comparing impact of using current vs. future condition factor on our growth estimates.
waa.t2 <-mod.growth.dat$CF*(laa.t/100)^3
# Now the growth, expected and realized.
mod.growth.dat$g <- waa.t/waa.tm1
# This is using the actual condition factor and growing the scallops by laa.t
mod.growth.dat$g2 <- waa.t2/waa.tm1
# same thing here but for the recruits
waa.tm1 <-mod.growth.dat$CF*(mod.growth.dat$l.k/100)^3
laa.t <- L.inf*(1-exp(-K))+exp(-K)*mod.growth.dat$l.k
waa.t <- c(mod.growth.dat$CF[-1],mod.growth.dat$CF[nrow(mod.growth.dat)])*(laa.t/100)^3
waa.t2 <- mod.growth.dat$CF*(laa.t/100)^3
mod.growth.dat$gR <- waa.t/waa.tm1
mod.growth.dat$gR2 <- waa.t2/waa.tm1# setwd("C:/Assessment/2014/r")
# Need to replace 2019 and 2020 values because of missing 2020 survey. 2021 is NOT influenced by missing data in 2020, it's only 2019 and 2020 that need imputed
# how we have everything set up. I am going to do this the same way we have set it up for BBn and GB for consistency sake.
# Now we fill in 2014, 2015, 2019 and 2020 because of missing surveys in 2015 and 2020
mod.growth.dat[nrow(mod.growth.dat)+ 1,] <- NA
mod.growth.dat$year[nrow(mod.growth.dat)] <- 2020
mod.growth.dat[nrow(mod.growth.dat)+ 1,] <- NA
mod.growth.dat$year[nrow(mod.growth.dat)] <- 2015
mod.growth.dat <- mod.growth.dat[order(mod.growth.dat$year),]
# Fill in 2019 and 2020 with LTM growth before 2020.
mod.growth.dat$g[which(mod.growth.dat$year %in% c(2014:2015,2019:2020))] <- median(mod.growth.dat$g[mod.growth.dat$year<2020], na.rm=T)
mod.growth.dat$g2[which(mod.growth.dat$year %in% c(2015,2020))] <- median(mod.growth.dat$g2[mod.growth.dat$year<2020], na.rm=T)
mod.growth.dat$gR[which(mod.growth.dat$year %in% c(2014:2015,2019:2020))] <- median(mod.growth.dat$gR[mod.growth.dat$year<2020], na.rm=T)
mod.growth.dat$gR2[which(mod.growth.dat$year %in% c(2015,2020))] <- median(mod.growth.dat$gR2[mod.growth.dat$year<2020], na.rm=T)
# Turn this into a vector and add a value for next year.
growth <- data.frame(g = c(mod.growth.dat$g,mod.growth.dat$g2[nrow(mod.growth.dat)]),
gR = c(mod.growth.dat$gR,mod.growth.dat$gR2[nrow(mod.growth.dat)]),
year = c(mod.growth.dat$year,max(mod.growth.dat$year+1)))
growth <- growth %>% dplyr::filter(year %in% c(years,(max(years)+1)))
# # We need to make up some data for 2020 since there wasn't a survey. We could also do it this way, result is very simlar
# # First we take them mean CF between 2019 and 2021
# mod.growth.dat$CF[mod.growth.dat$year == 2020] <- median(mod.growth.dat$CF[mod.growth.dat$year %in% 2019:2021],na.rm=T)
# mod.growth.dat$l.bar[mod.growth.dat$year == 2020] <- median(mod.growth.dat$l.bar[mod.growth.dat$year %in% 2019:2021],na.rm=T)
# mod.growth.dat$l.k[mod.growth.dat$year == 2020] <- median(mod.growth.dat$l.k[mod.growth.dat$year %in% 2019:2021],na.rm=T)
#von.B <- function(L.inf,to,K) {L <- L.inf*(1-exp(-K*(age-t0)))}
# # GB
# k = 0.22
# Linf <- 149
# t0 <- 0.22
# age=3:6
#
# Linf*(1-exp(-K*(age-t0)))
#
# #BBN
# k = 0.19
# Linf <- 148
# t0 <- 0.11
# age=3:6
#
# Linf*(1-exp(-K*(age-t0)))
# Here are the Sable parameters, note that I believe the to is due to differences in ageing that was done in the
# 1980s-early 2000s which we have moved away from since 2010, this aligns us with NOAA methods
# #Sab
# k = 0.2269
# Linf <- 136
# t0 <- 1.337
# age=3:6
#
# Linf*(1-exp(-K*(age-t0)))
#
#growth <- data.frame(g = runif(NY+1,1.1,1.2),gR = runif(NY+1,1.2,1.4))
# Subset the fishery data to the correct years. We need to take on next year catch too..)
Sab.fish <- Sab.fish %>% dplyr::filter(survey.year %in% years) # c(years,(max(years)+1))
#tst <- Sab.fish.sf %>% dplyr::filter(survey.year %in% 1993:2014)
#rems <- tst %>% dplyr::group_by(year) %>% dplyr::summarise(tot = sum(pro.repwt,na.rm=T))
Sab.fish.sf <- st_as_sf(Sab.fish,coords = c("lon","lat"),remove =F, crs = 4326)
Sab.fish.sf <- Sab.fish.sf %>% st_transform(crs= c_sys)
# Now lets clip this to be data inside of our Sab boundary.
Sab.fish.sf <- st_intersection(Sab.fish.sf,Sab.shape)
# Check removals each fishing year calculated using this data
Sab.fish.by.year <- Sab.fish.sf %>% dplyr::group_by(year) %>% dplyr::summarise(tot = sum(pro.repwt,na.rm=T))
Sab.fish.by.survey.year <- Sab.fish.sf %>% dplyr::group_by(survey.year) %>% dplyr::summarise(tot = sum(pro.repwt,na.rm=T))
# So this looks reasonable in the most recent years, but I probably need to check the early years to see if we are missing any of the removals, from above check (only 12 points removed) it
# seems like we might be fine, but need to check against our historical Removals estimates...
#tail(Sab.fish.by.year)
#tail(Sab.fish.by.survey.year)
# OK, so now let's see if we can use the catch knot thing Raph made to split this up withing the Sab domain
#We just need 3 columns for this
catch.sf <- Sab.fish.sf %>% dplyr::select(pro.repwt,survey.year)
names(catch.sf) <- c("Catch","Year","geometry")
# For the moment we need to have this starting at year 1.
catch.sf$Year <- catch.sf$Year - (min(years)-1)
# Get rid of any 0s (due to survey year fun things)
#catch.sf$Catch <- catch.sf$Catch
#catch.sf$geometry <- catch.sf$geometry/1000
# A map...
# b.map <- pecjector(area= "Sab",c_sys = c_sys,add_layer = list(land = 'grey',eez = 'eez' , nafo = 'main',sfa = 'offshore',survey = c("offshore","outline")),txt.size=8,axes = "DM")
# Sab.fish.map <- b.map + geom_sf(data = Sab.fish.sf) + facet_wrap(~year) + geom_sf(data= Sab.shape,fill = NA)
# Sab.fish.map
# Set up our mesh...
#Sab.mesh <- setup_mesh(catch.sf,model_bound = Sab.shape,nknot=8,seed=20) # Seeds 20 and 66 work
#Sab.shape$geometry <- Sab.shape$geometry/1000
#st_crs(Sab.shape) <- 32619
#mod.input.sf$geometry <- mod.input.sf$geometry/1000
#st_crs(mod.input.sf) <- 32619
Sab.mesh <- setup_mesh(mod.input.sf,model_bound = Sab.shape,nknot=num.knots, max.edge = c(8,20),cutoff=2.5,seed=34)
Sab.mesh.sf <- inla.mesh2sf(Sab.mesh$mesh)
Sab.mesh.sf$triangles$geometry <- Sab.mesh.sf$triangles$geometry*1000
Sab.mesh.sf$vertices$geometry <- Sab.mesh.sf$vertices$geometry*1000
st_crs(Sab.mesh.sf$triangles) <- c_sys
st_crs(Sab.mesh.sf$vertices) <- c_sys
knots.sf <- st_as_sf(as.data.frame(Sab.mesh$knots$centers), coords = c("X","Y"))
knots.sf$geometry <- knots.sf$geometry*1000
st_crs(knots.sf) <- c_sys
# Plot the mesh
#ggplot(Sab.mesh.sf$triangles) + geom_sf() + geom_sf(data= Sab.shape,fill = NA,color = 'blue',size=2) + geom_sf(data = knots.sf,fill = NA)
# Now make the prediction grid
pred.grid<-setup_pred_grid(knots=Sab.mesh$knots,model_bound=Sab.mesh$utm_bound)
st_crs(pred.grid$grid) <- c_sys
# Plot the grid
#ggplot(pred.grid$grid) + geom_sf(aes(fill = as.factor(knotID))) + scale_fill_viridis_d()
# Get the knots on the right scale
#knots.on.right.scale <- Sab.mesh$knots
#knots.on.right.scale$centers <- knots.on.right.scale$centers*1000
#Sebdam catches
catchy <- catch_spread(catch = catch.sf,knots = Sab.mesh$knots)
catchy$sum_catches <- catchy$sum_catches[,-1] # A hack until Raph gets new model code up and running.
# For now we need to toss the first column from there
#catchy$density_catches <- catchy$density_catches[,-1]
#catchy$sum_catches <- catchy$sum_catches[,-1]
catchy
#TLM catch
catch.tlm <- catch.sf %>% group_by(Year,.drop=F) %>% dplyr::summarise(catch = sum(Catch,na.rm=T))
# ggplot(mod.input.sf) + geom_boxplot(aes(x=Year,y=I,group=Year)) + scale_y_log10()
# ggplot(mod.input.sf) + geom_boxplot(aes(x=Year,y=IR,group=Year))+ scale_y_log10()
# ggplot(mod.input.sf) + geom_boxplot(aes(x=Year,y=N,group=Year))+ scale_y_log10()
# ggplot(mod.input.sf) + geom_boxplot(aes(x=Year,y=L,group=Year))+ scale_y_log10()
# Fart around with inputs and see if that helps...
#mod.input.sf$L <- 3*mod.input.sf$L # This seemed to help when running with 10 years, but blew up with 20
#mod.input.sf$L[mod.input.sf$L > mod.input.sf$N] <- 0.5*mod.input.sf$N[mod.input.sf$L > mod.input.sf$N]
# Maybe I'm seeing too many recruits??
#mod.input.sf$IR <- mod.input.sf$IR/2.5
# What happens if I triple the number of Fully Recruited?
#mod.input.sf$I <- 3* mod.input.sf$I
# Here I try to make the IR's a fraction of the I in the following year, which may/may not be overly complicating the issue...
# for(i in 1:NY)
# {
# if(i < NY & years[i] != 2015) mod.input.sf$IR[mod.input.sf$year == years[i]] <- runif(mod.input.sf$IR[mod.input.sf$year == (years[i])],0.01,0.1)*mean(mod.input.sf$I[mod.input.sf$year == (years[i]+1)])
# if(i == NY) mod.input.sf$IR[mod.input.sf$year == years[i]] <- runif(mod.input.sf$IR[mod.input.sf$year == (years[i])],0.01,0.1)*mean(mod.input.sf$I[mod.input.sf$year == (years[i])])
# }
#SEBDAM version
if(mod.select == "SEAM")
{
set_data<-data_setup(data=mod.input.sf,growths=growth,catch=as.data.frame(catchy$sum_catches),
model="SEBDAM",mesh=Sab.mesh$mesh,obs_mort=T, prior=TRUE,prior_pars=c(10,12),
mult_qI=T,spat_approach="spde",
knot_obj=Sab.mesh$knots,knot_area=pred.grid$area,separate_R_aniso =T,all_se=FALSE)
# So this will fix the mean value of m0 to be whatever the intial value is set at. Let's see what happens!
set_data$par$log_m0 <- 0 # 0 = 1
set_data$par$log_R0 <- 5.9915 # 5.3 = 200, 5 = 148, 4 = 55, 5.9915 = 400, 4.606 = 100
#set_data$par$log_qR <- -1.5
#set_data$map <-list(log_m0=factor(NA))
set_data$map <-list(log_m0=factor(NA),log_R0 = factor(NA))
#set_data$map <-list(log_m0=factor(NA),log_R0 = factor(NA),log_qR = factor(NA))
}
#TLM version, Dude is this ever sensitive to the q priors! (5,12) actually looks solid in terms of results... maybe we can get so lucky with SEBDAM :-)
# Note that the Catch time series should be 1 year longer than the survey data here!!
if(mod.select == "TLM")
{
set_data<-data_setup(data=as.data.frame(mod.input.sf),growths=growth[,1:2],catch=catch.tlm$catch,
model="TLM",obs_mort=T,prior=T,prior_pars=c(10,12))
set_data$par$log_q_R <- -0.7 #-0.7 # Around 0.5, similar to some of the seam models.
#set_data$par$log_R0 <- 4 # 5.3 = 200, 5 = 148, 4 = 55, 6 = 400
#set_data$par$log_qR <- -1.5
#set_data$map <-list(log_m0=factor(NA),log_R0 = factor(NA),log_qR = factor(NA))
set_data$map <-list(log_q_R=factor(NA))
}
#save.image("D:/Github/BBn_model/Results/sab_input.RData")
#load("D:/Github/BBn_model/Results/sab_input.RData")
#Mcdonald, Raphael well did you look at the data after data_setup?
#like set_data$data, look at n_tows and pos_tows_I
#if you get an n_tows=0, that'll tell you what year something weird is happening, narrows your scope
#set_data$par$log_R0 <- 10 # Testing to see if I start this lower if it impacts our recruitment estimates.
str(set_data$data)
#obj<-TMB::MakeADFun(data=set_data$data,parameters=set_data$par,random=set_data$random,map=set_data$map,DLL="SEBDAM",silent=F)
#Opt<-try(stats::nlminb(start=obj$par,obj=obj$fn,gr=obj$gr,control=control),T)
#Report <- obj$report()
#mod.fit <- readRDS("D:/Github/BBn_model/Results/Models/Sab_model_output_1993_2022_5_knots_m_0_3.Rds")
#set_data$par$log_B0 <- 6 # If you get an NA error in the model below, set log_B0 to a super high number
#set_data$par$log_R0 <- 15
#set_data$par$log_m0 <- -1
mod.fit<-fit_model(set_data,silent=F)
# Now save the results appropriately
if(mod.select != "TLM")
{
m0.par <- exp(set_data$par$log_m0)
r0.par <- signif(exp(set_data$par$log_R0),digits=2)
scenario.select <- paste0(min(years),"_",max(years),"_vary_m_m0_",m0.par,"_R0_",r0.par,"_",num.knots,"_knots")
saveRDS(mod.fit,paste0(repo.loc,"Results/Models/Sab/Sab_",mod.select,"_model_output_",scenario.select,".Rds"))
saveRDS(Sab.mesh,paste0(repo.loc,"Results/Models/Sab/Sab_",mod.select,"_model_output_",scenario.select,"_mesh.Rds"))
saveRDS(pred.grid,paste0(repo.loc,"Results/Models/Sab/Sab_",mod.select,"_model_output_",scenario.select,"_predict_grid.Rds"))
}
if(mod.select == "TLM")
{
qR.par <- sub("0.","0_",signif(exp(set_data$par$log_q_R),digits=2))
scenario.select <- paste0(min(years),"_",max(years),"_qR_",qR.par)
saveRDS(mod.fit,paste0(repo.loc,"Results/Models/Sab/Sab_",mod.select,"_model_output_",scenario.select,".Rds"))
}
# unique(names(mod.fit$sdrep$value))
# params<-get_parameters(mod.fit)
# str(params)
# rownames(params)
# #write.csv(params,"D:/Github/BBn_model/Results/Model/Sab_parms.csv")
#
# pred.proc<-get_processes(mod.fit)
# str(pred.proc)
# summary(pred.proc$densities$m)
#
# plot(c(years,max(years)+1),pred.proc$totals$totB, type = 'b',xlab="",ylab="Total Biomass (tonnes)")
# plot(c(years,max(years)+1),pred.proc$totals$totR, type = 'b',xlab="",ylab = "Recruits")
# plot(c(years,max(years)+1),pred.proc$totals$mean_m, type = 'b',xlab="",ylab = "Natural mortality")
# #TLM
# plot(c(years,max(years)+1),pred.proc$process$B, type = 'b',xlab="",ylab="Total Biomass (tonnes)")
# plot(c(years,max(years)+1),pred.proc$process$R, type = 'b',xlab="",ylab = "Recruits")
# plot(c(years,max(years)+1),pred.proc$process$m, type = 'b',xlab="",ylab = "Natural mortality")
#summary(pred.proc$process$m)
# Ideas from Raph... perhaps a knot with a lot of recruit 0's could be an issue
# If we have a knot with a 0 in the first year, that could send the model off to la-la land
# Increasing the L/N ratio didn't help with S/m estimates
# Incresing the L/N ratio and decreasing the number of Recruits didn't help with S/m estimates
# decreasing the number of recruits by >50% seems to make the qR estimate much more sensisble
# Tripling the biomass of fully recruited also helps, but still nothing is working with the natural mortality.
# changing the length of the time series hasn't helped with much of anything, but does give unstable results.
# Using the SPDE model hasn't seemed to help
# Increasing FR biomass artificially by a factor of 3 does nothing, combining that with a more realistic
# natural
# Using a fixed m of 0.1 starting in 1986,1991, and 1995 all results in the qR being > 1, 1997 was false convergences with these settings.
# Moving to 1999 make qR kinda reasonable, bit high (about 0.55) but reasonable nonetheless.
# Going to 2002 and qR gets worse again... I can't get model to go with 1998 data for some reason...
# Using 2000 and qR goes above 1, start year is very sensistive.
# I tried going from 1999 with the recruits being 70-90 mm and the FR being 90+, that didn't do anything useful, qR was > 1.8.
# I tried going from 1999 with recruits being 50-80 and FR being 80+ and..... nothing good happened.
# What does work is fixing m to be relatively high (around 0.3) and lowering the number of knots, 4 and 10 seem to work
# Other numbers end up with the 0 recruits problem.
# So here are my initial suite of models to compare.
################################################### End the initial model runs ###########################################
################################################### End the initial model runs ###########################################
################################################### End the initial model runs ###########################################
##################### Now load the model and make the figures! ##############################################
atow<-800*2.4384/10^6 # area of standard tow in km2
num.knots <- 4
RO <- 55
qR <- "0_5" # This is just for TLM models
years <- 1995:2022
NY <- length(years)
c_sys <- 32620
theme_set(theme_few(base_size = 22))
repo.loc <- "D:/Github/BBn_model/"
mod.select <- "SEAM"
################################################### End the initial model runs ###########################################
### Make the figures for the models
if(mod.select != "TLM") scenario.select <- paste0(min(years),"_",max(years),"_vary_m_m0_1_R0_",RO,"_",num.knots,"_knots")
if(mod.select == "TLM") scenario.select <- paste0(min(years),"_",max(years),"_qR_",qR)
mod.fit <- readRDS(paste0(repo.loc,"Results/Models/Sab/Sab_",mod.select,"_model_output_",scenario.select,".Rds"))
if(mod.select != "TLM") catchy <- mod.fit$obj$env$data$C*mod.fit$obj$env$data$area # Get this into tonnes from catch density.
if(mod.select == "TLM") catchy <- mod.fit$obj$env$data$C
# This is only needed for SEAM.
if(mod.select != "TLM")
{
pred.grid <- readRDS(paste0(repo.loc,"Results/Models/Sab/Sab_",mod.select,"_model_output_",scenario.select,"_predict_grid.Rds"))
# Now set up to run the figures
# Now set up to run the figures
matYear<-c(rep(c(years,(max(years)+1)),each=num.knots))
matYear1<-c(rep(years,each=num.knots))
knots<-rep(1:num.knots,NY+1)
knots1<-rep(1:num.knots,NY)
grid.gis <- pred.grid$grid
grid.gis$geometry <- grid.gis$geometry*1000
st_crs(grid.gis) <- 32620
# Now simplify the grid for the spatial plots...
knot.gis <- aggregate(grid.gis, list(grid.gis$knotID), function(x) x[1])
# Get the spatial data output
B<-data.frame(B=as.vector(mod.fit$report$B),Year=matYear,knotID=knots)
B.dat.plot<-left_join(knot.gis,B,by=c("knotID"))
# Recruits
R<-data.frame(R=as.vector(mod.fit$report$R),Year=matYear1, knotID=knots1)
R.dat.plot<-left_join(knot.gis,R,by=c("knotID"))
#Natural Mortality
m<-data.frame(m=as.vector(mod.fit$report$m),Year=matYear,knotID=knots)
m.dat.plot<-left_join(knot.gis,m,by=c("knotID"))
#Spatial q's
qI<-data.frame(qI=as.vector(mod.fit$report$qI),knotID=unique(knots))
q.dat.plot<-left_join(knot.gis,qI,by=c("knotID"))
# Explotation
# Now lets try and make a spatial exploitation rate plot. I'm not 1000% sure this is how I want to calculate this, but I think it is... sort it out :-)
F.dat<-data.frame(B=as.vector(mod.fit$report$areaB[,-ncol(mod.fit$report$areaB)]/1000),
C = as.vector(as.matrix(catchy)), Year=matYear1, knotID=knots1)
F.dat <- F.dat %>% dplyr::mutate(exploit = C/(B+C)) # Sticking with how offshore does this (C/(B+C)) C/B of some variant may be more realistic
F.dat.plot<-left_join(knot.gis,F.dat,by=c("knotID"))
# To get a weighted m time series combing the B, R, and the m data above
bmr <- data.frame(B=as.vector(mod.fit$report$areaB/1000),
m=as.vector(mod.fit$report$m),
R=c(as.vector(mod.fit$report$areaR/1000),rep(NA,num.knots)),
Year=matYear, knotID=knots)
# Now get an m estimate for the whole area for B, R, and both, I think easiest will be to get totB and totR by year in here...
tBR <- bmr %>% group_by(Year) %>% dplyr::summarise(totB = sum(B,na.rm=T),
totR = sum(R,na.rm=T),
totSSB = sum(B,na.rm=T) + sum(R,na.rm=T))
# Now merge that with bmr...
bmr <- left_join(bmr,tBR,by="Year")
# Now getting weighted means by year should be easy...
nat.mat <- bmr %>% group_by(Year) %>% dplyr::summarise(m.FR = sum(B*m/totB),
m.R = sum(R*m/totR),
m.all = sum((R+B)*m/totSSB))
nat.mat$Raph.m <- mod.fit$report$mean_m
# Now make them long...
nat.mat.plt <- pivot_longer(nat.mat,!Year,names_to = "method",values_to = 'm')
nat.mat.plt$method <- factor(nat.mat.plt$method,levels = c("m.all","Raph.m","m.FR","m.R"),labels = c("Weighted","Unweighted","Fully Recruited","Recruits"))
# If not already loaded, make a map
#b.map <- pecjector(area= "BBn",c_sys = c_sys,add_layer = list(land = 'grey',eez = 'eez' , nafo = 'main',sfa = 'offshore',survey = c("offshore","outline")),txt.size=8,axes = "DM")
# Final piece is that we can compare the biomass knot by knot to what would be expected based on B/R/m/g/C alone. This is a bit messy but this does the trick...
gs <- mod.fit$obj$env$data$gI
gRs <- mod.fit$obj$env$data$gR
tmp <- NULL
for(i in 1:(NY-1))
{
Bs.tot <- mod.fit$report$B*mod.fit$obj$env$data$area/1000
Rs.tot <- mod.fit$report$R*mod.fit$obj$env$data$area/1000
ms <- mod.fit$report$m
Bst <- (exp(-ms[,i+1]))*gs[i]*(Bs.tot[,i]-catchy[,i])
Rst <- (exp(-ms[,i+1]))*gRs[i]*(Rs.tot[,i])
B2 <- Bst + Rst
if(any(B2 < 0)) {B2[B2<= 0] <- 5; print("HEADS UP!! You have a negative B2 estimate (look for the 5s in B.exp).")}
B.next <- Bs.tot[,i+1]
B.diff <- B2 - B.next
B.per.diff <- 100*((B2 - B.next)/B.next)
n.not <- length(B2)
if(i == 1) tmp[[as.character(years[i])]] <- data.frame(knot = 1:n.not,Year = rep(years[i],n.not),B.exp = NA,B.mod = NA,B.fr = NA,B.rec = NA,B.diff = NA,B.per.diff = NA,
m = NA,C = NA,g = NA,gR = NA)
tmp[[as.character(years[i+1])]] <- data.frame(knot = 1:n.not,Year = rep(years[i+1],n.not),B.exp = B2,B.mod = B.next,B.fr = Bst,B.rec = Rst,B.diff = B.diff,B.per.diff = B.per.diff,
m = ms[,i+1],C = catchy[,i],g = rep(gs[i],n.not),gR = rep(gRs[i],n.not))
}
# Unpack that object
B.diff.comp <- do.call('rbind',tmp)
Bdiff.comp.dat<-data.frame(B.diff=as.vector(B.diff.comp$B.diff),B.per.diff = as.vector(B.diff.comp$B.per.diff),
B.exp=as.vector(B.diff.comp$B.exp),B.mod = as.vector(B.diff.comp$B.mod),
B.fr=as.vector(B.diff.comp$B.fr),B.rec = as.vector(B.diff.comp$B.rec),
Year=matYear1,knotID=knots1)
Bdiff.comp.dat.plot<-left_join(knot.gis,Bdiff.comp.dat,by=c("knotID"))
Bdiff.comp.dat.plot <- Bdiff.comp.dat.plot %>% dplyr::filter(Year != min(years))
# Save this so I can compare the 'miss' across models...
saveRDS(Bdiff.comp.dat.plot,paste0(repo.loc,"Results/Models/Sab/Sab_",mod.select,"_model_output_",scenario.select,"_B_differnce.Rds"))
# Smaller text for spatial figures.
theme_set(theme_few(base_size = 14))
#Spatial predictions
#B
b.brk <- pretty(log(B.dat.plot$B))
b.lab <- signif(exp(b.brk),digits=2)
spatial.B.plot<- ggplot() + geom_sf(data=B.dat.plot,aes(fill=log(B)),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_viridis_c(breaks = b.brk, labels=b.lab,name="Predicted Biomass \nDensity (kg\U2022km\U207B\U00B2)",option = "A",begin=0.2) +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_biomass.png"),spatial.B.plot,base_width = 10,base_height = 10)
# Remove missing survey years
spatial.B.plot<- ggplot() + geom_sf(data=B.dat.plot,aes(fill=log(B)),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_viridis_c(breaks = b.brk, labels=b.lab,name="Predicted Biomass \nDensity (kg\U2022km\U207B\U00B2)",option = "A",begin=0.2) +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_biomass.png"),spatial.B.plot,base_width = 10,base_height = 10)
# Remove missing survey years
spatial.B.plot<- ggplot() + geom_sf(data=B.dat.plot %>% dplyr::filter(!Year %in% c(2015,2020,2023)),aes(fill=log(B)),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_viridis_c(breaks = b.brk, labels=b.lab,name="Predicted Biomass \nDensity (kg\U2022km\U207B\U00B2)",option = "A",begin=0.2) +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_biomass_no_missing_surveys.png"),spatial.B.plot,base_width = 10,base_height = 10)
# Subest to 4 years
spatial.B.plot<- ggplot() + geom_sf(data=B.dat.plot %>% dplyr::filter(Year %in% c(2001,2009,2014,2019)),aes(fill=log(B)),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_viridis_c(breaks = b.brk, labels=b.lab, name="Predicted Biomass \nDensity (kg\U2022km\U207B\U00B2)",option = "A",begin=0.2) +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_biomass_4_years.png"),spatial.B.plot,base_width = 10,base_height =7)
#R
r.brk <- pretty(log(R.dat.plot$R))
r.lab <- signif(exp(r.brk),digits=2)
spatial.R.plot<- ggplot() + geom_sf(data=R.dat.plot,aes(fill=log(R)),col='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_viridis_c(breaks = r.brk, labels = r.lab,name="Predicted Recruit \nDensity (kg\U2022km\U207B\U00B2)",end=0.8)+
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_recruits.png"),spatial.R.plot,base_width = 10,base_height = 10)
# Remove missing survey years
spatial.R.plot<- ggplot() + geom_sf(data=R.dat.plot %>% dplyr::filter(!Year %in% c(2015,2020,2023)),aes(fill=log(R)),col='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_viridis_c(breaks = r.brk, labels = r.lab,name="Predicted Recruit \nDensity (kg\U2022km\U207B\U00B2)",end=0.8)+
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_recruits_no_missing_surveys.png"),spatial.R.plot,base_width = 10,base_height = 10)
# 4 years
spatial.R.plot<- ggplot() + geom_sf(data=R.dat.plot %>% dplyr::filter(Year %in% c(2001,2009,2014,2019)),aes(fill=log(R)),col='grey')+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
facet_wrap(~Year)+
scale_fill_viridis_c(breaks = r.brk, labels = r.lab,name="Predicted Recruit \nDensity (kg\U2022km\U207B\U00B2)",end=0.8)+
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_recruits_4_years.png"),spatial.R.plot,base_width = 10,base_height = 7)
#m
m.brk <- log(c(0.03,0.1,0.3,1))
#m.brk <- pretty(log(m.dat.plot$m))
m.lab <- signif(exp(m.brk),digits=2)
spatial.m.plot <- ggplot() + geom_sf(data=m.dat.plot,aes(fill=log(m)),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_viridis_c(breaks = m.brk, labels = m.lab,name="Predicted Natural \nMortality",option = "B",direction =1,begin = 0.2,end=1) + theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_mort.png"),spatial.m.plot,base_width = 10,base_height = 10)
# Remove missing survey years
spatial.m.plot <- ggplot() + geom_sf(data=m.dat.plot %>% dplyr::filter(!Year %in% c(2015,2020,2023)),aes(fill=log(m)),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_viridis_c(breaks = m.brk, labels = m.lab,name="Predicted Natural \nMortality",option = "B",direction =1,begin = 0.2,end=1) + theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_mort_no_missing_surveys.png"),spatial.m.plot,base_width = 10,base_height = 10)
# 4 years
spatial.m.plot <- ggplot() + geom_sf(data=m.dat.plot %>% dplyr::filter(Year %in% c(2001,2009,2014,2019)),aes(fill=log(m)),color='grey')+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
facet_wrap(~Year)+
scale_fill_viridis_c(breaks = m.brk, labels = m.lab,name="Predicted Natural \nMortality (Inst)",option = "B",direction =1,begin = 0.2,end=1) +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_mort_4_years.png"),spatial.m.plot,base_width = 10,base_height = 7)
# q
spatial.q.plot <- ggplot() + geom_sf(data=q.dat.plot,aes(fill=qI),col=NA)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_viridis_c(name="Predicted catchability (qI)",option = "C",begin = 0.2,end =0.8) +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_catchability.png"),spatial.q.plot,base_width = 10,base_height = 10)
# OK, so lets try an make a map of the spatial exploitation rates, not sure if this is all correct yet.
F.dat.plot$exp.na <- NA
F.dat.plot$exp.na[F.dat.plot$exploit != 0] <- F.dat.plot$exploit[F.dat.plot$exploit != 0]
e.brk <- log(c(0.00015,0.001,0.005,0.02,0.08))
#e.brk <- pretty(log(F.dat.plot$exp.na))
e.lab <- signif(exp(e.brk),digits=2)
spatial.exploit.plot<- ggplot() + geom_sf(data=F.dat.plot,aes(fill=log(exp.na)),color='grey') +
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
facet_wrap(~Year) + scale_fill_viridis_c(breaks = e.brk,labels = e.lab,name="Exploitation (Prop)",option = "D") + theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Spatial_exploit.png"),spatial.exploit.plot,base_width = 10,base_height = 10)
# Remove missing survey years
spatial.exploit.plot<- ggplot() + geom_sf(data=F.dat.plot %>% dplyr::filter(!Year %in% c(2015,2020,2023)),aes(fill=log(exp.na)),color='grey') +
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
facet_wrap(~Year) + scale_fill_viridis_c(breaks = e.brk,labels = e.lab,name="Exploitation (Prop)",option = "D") + theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Spatial_exploit_no_missing_surveys.png"),spatial.exploit.plot,base_width = 10,base_height = 10)
# 4 years
spatial.exploit.plot<- ggplot() + geom_sf(data=F.dat.plot %>% dplyr::filter(Year %in% c(2001,2009,2014,2019)),aes(fill=log(exp.na)),color='grey') +
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
facet_wrap(~Year) +
scale_fill_viridis_c(breaks = e.brk,labels = e.lab,name="Exploitation (Prop)",option = "D") +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Spatial_exploit_4_years.png"),spatial.exploit.plot,base_width = 10,base_height = 10)
bd.brk <- pretty(Bdiff.comp.dat.plot$B.diff)
bd.lab <- bd.brk#signif(exp(b.brk),digits=2)
spatial.Bdiff.plot<- ggplot() + geom_sf(data=Bdiff.comp.dat.plot,aes(fill=B.diff),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_distiller(type = 'div',breaks = bd.brk, labels=bd.lab, name="Expected - Modeled \nBiomass (tonnes)",palette = "RdBu") +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_BDiff.png"),spatial.Bdiff.plot,base_width = 10,base_height = 10)
# Remove missing survey years
spatial.Bdiff.plot<- ggplot() + geom_sf(data=Bdiff.comp.dat.plot %>% dplyr::filter(!Year %in% c(2015,2020,2023)),aes(fill=B.diff),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_distiller(type = 'div',breaks = bd.brk, labels=bd.lab, name="Expected - Modeled \nBiomass (tonnes)",palette = "RdBu") +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_BDiff_no_missing_surveys.png"),spatial.Bdiff.plot,base_width = 10,base_height = 10)
# 4 years
spatial.Bdiff.plot<- ggplot() + geom_sf(data=Bdiff.comp.dat.plot %>% dplyr::filter(Year %in% c(2001,2009,2014,2019)),aes(fill=B.diff),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_distiller(type = 'div',breaks = bd.brk, labels=bd.lab, name="Expected - Modeled \nBiomass (tonnes)",palette = "RdBu") +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_BDiff_4_years.png"),spatial.Bdiff.plot,base_width = 10,base_height = 7)
# Same plot but the percentage miss by cell.
pb.brk <- pretty(Bdiff.comp.dat.plot$B.per.diff)
pb.lab <- pb.brk#signif(exp(b.brk),digits=2)
spatial.per.Bdiff.plot<- ggplot() + geom_sf(data=Bdiff.comp.dat.plot,aes(fill=B.per.diff),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_distiller(type = 'div',breaks = pb.brk, labels=pb.lab, name="Expected - Modeled \nBiomass (%)",palette = "RdBu") +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_per_BDiff.png"),spatial.per.Bdiff.plot,base_width = 10,base_height = 10)
# Remove missing survey years
spatial.per.Bdiff.plot<- ggplot() + geom_sf(data=Bdiff.comp.dat.plot %>% dplyr::filter(!Year %in% c(2015,2020,2023)),aes(fill=B.per.diff),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_distiller(type = 'div',breaks = pb.brk, labels=pb.lab, name="Expected - Modeled \nBiomass (%)",palette = "RdBu") +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_per_BDiff_no_missing_surveys.png"),spatial.per.Bdiff.plot,base_width = 10,base_height = 10)
# 4 years
spatial.per.Bdiff.plot<- ggplot() + geom_sf(data=Bdiff.comp.dat.plot %>% dplyr::filter(Year %in% c(2001,2009,2014,2019)),aes(fill=B.per.diff),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_distiller(type = 'div',breaks = pb.brk, labels=pb.lab, name="Expected - Modeled \nBiomass (%)",palette = "RdBu") +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_per_BDiff_4_years.png"),spatial.per.Bdiff.plot,base_width = 10,base_height = 7)
theme_set(theme_few(base_size = 22))
# And now plot them....
m.comp <- ggplot(nat.mat.plt,aes(x=Year,y=m,group = method,color=method)) + geom_line(linewidth=1.5) +
xlab("") + ylab("Natural Mortality (instantaneous)")+
scale_color_manual(values = c("blue","orange","grey","black")) +
theme(legend.title=element_blank())
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/natural_mortality_comparisons.png"),m.comp,base_height = 6,base_width = 10)
# Remove missing survey years
m.comp <- ggplot(nat.mat.plt %>% dplyr::filter(Year < 2015)) + geom_line(aes(x=Year,y=m,group = method,color=method),linewidth=1.5) +
geom_line(data = nat.mat.plt %>% dplyr::filter(Year %in% 2016:2019), aes(x=Year,y=m,group = method,color=method),linewidth=1.5) +
geom_line(data = nat.mat.plt %>% dplyr::filter(Year %in% 2021:2022), aes(x=Year,y=m,group = method,color=method),linewidth=1.5) +
xlab("") + ylab("Natural Mortality (instantaneous)")+
scale_color_manual(values = c("blue","orange","grey","black")) +
theme(legend.title=element_blank())
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/natural_mortality_comparisons_no_missing_surveys.png"),m.comp,base_height = 6,base_width = 10)
}
pred.proc <- get_processes(mod.fit)
if(mod.select == "TLM")
{
pred.proc$log_processes$year <- c(years,max(years)+1)
pred.proc$log_processes$totB.LCI <- exp(pred.proc$log_processes$log_B - 1.96*pred.proc$log_processes$se_log_B)
pred.proc$log_processes$totB.UCI <- exp(pred.proc$log_processes$log_B + 1.96*pred.proc$log_processes$se_log_B)
pred.proc$log_processes$totR.LCI <- exp(pred.proc$log_processes$log_R - 1.96*pred.proc$log_processes$se_log_R)
pred.proc$log_processes$totR.UCI <- exp(pred.proc$log_processes$log_R + 1.96*pred.proc$log_processes$se_log_R)
pred.proc$log_processes$m.LCI <- exp(pred.proc$log_processes$log_m - 1.96*pred.proc$log_processes$se_log_m)
pred.proc$log_processes$m.UCI <- exp(pred.proc$log_processes$log_m + 1.96*pred.proc$log_processes$se_log_m)
pred.proc$log_processes <- as.data.frame(pred.proc$log_processes)
}
if(mod.select != "TLM")
{
# Get the overall estimates + the 95% CI
pred.proc$log_processes$year <- c(years,max(years)+1)
pred.proc$log_processes$log_B <- pred.proc$log_tot_frame$log_totB
pred.proc$log_processes$log_R <- pred.proc$log_tot_frame$log_totR
pred.proc$log_processes$log_m <- pred.proc$log_tot_frame$log_mean_m
pred.proc$log_processes$totB.LCI <- exp(pred.proc$log_tot_frame$log_totB - 1.96*pred.proc$log_tot_frame$se_log_totB)
pred.proc$log_processes$totB.UCI <- exp(pred.proc$log_tot_frame$log_totB + 1.96*pred.proc$log_tot_frame$se_log_totB)
pred.proc$log_processes$totR.LCI <- exp(pred.proc$log_tot_frame$log_totR - 1.96*pred.proc$log_tot_frame$se_log_totR)
pred.proc$log_processes$totR.UCI <- exp(pred.proc$log_tot_frame$log_totR + 1.96*pred.proc$log_tot_frame$se_log_totR)
pred.proc$log_processes$m.LCI <- exp(pred.proc$log_tot_frame$log_mean_m - 1.96*pred.proc$log_tot_frame$se_log_mean_m)
pred.proc$log_processes$m.UCI <- exp(pred.proc$log_tot_frame$log_mean_m + 1.96*pred.proc$log_tot_frame$se_log_mean_m)
pred.proc$log_processes <- as.data.frame(pred.proc$log_processes)
}
# SEBDAM Version
# Annual explotation
if(mod.select != "TLM") catch.annual <- data.frame(totC = colSums(catchy), Year = years)
if(mod.select == "TLM") catch.annual <- data.frame(totC = catchy, Year = years)
# Not 1000% sure this is correct at this point. It is noteworth how different this is than the map though...
ann.exploit <- data.frame(year = years,B = exp(pred.proc$log_processes$log_B[-nrow(pred.proc$log_processes)]), Catch = catch.annual$totC)
ann.exploit$exploit <- ann.exploit$Catch/(ann.exploit$B+ann.exploit$Catch)
pred.proc$log_processes <- pred.proc$log_processes %>% dplyr::filter(year < 2023)
# Biomass time series
bm.ts.plot <- ggplot(pred.proc$log_processes) + geom_line(aes(year,exp(log_B)),color='firebrick2',linewidth=1.5) +
geom_ribbon(aes(ymin=totB.LCI,ymax=totB.UCI,x=year),alpha=0.5,fill='blue',color='blue') + ylim(c(0,1.6e4)) +
xlab("") + ylab("Fully Recruited Biomass (tonnes)") + scale_x_continuous(breaks = seq(1980,2030,by=3))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Biomass_time_series.png"),bm.ts.plot,base_width = 11,base_height = 8.5)
# Recruit time seris
rec.ts.plot <- ggplot(pred.proc$log_processes) + geom_line(aes(year,exp(log_R)),color='firebrick2',linewidth=1.5) +
geom_ribbon(aes(ymin=totR.LCI,ymax=totR.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
xlab("") + ylab("Recruit Biomass (tonnes)") + ylim(c(0,3.5e3)) + scale_x_continuous(breaks = seq(1980,2030,by=3))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Recruit_time_series.png"),rec.ts.plot,base_width = 11,base_height = 8.5)
# Natural mortality time series...
mort.ts.plot <- ggplot(pred.proc$log_processes) + geom_line(aes(year,exp(log_m)),color='firebrick2',linewidth=1.5) +
geom_ribbon(aes(ymin=m.LCI,ymax=m.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
xlab("") + ylab("Natural mortality (Instantaneous)") + ylim(c(0,2.5)) + scale_x_continuous(breaks = seq(1980,2030,by=3))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_nat_mort_time_series.png"),mort.ts.plot,base_width = 11,base_height = 8.5)
# Explotation Rate Time Series
exploit.plot <- ggplot(ann.exploit) + geom_line(aes(x=year,y=exploit),linewidth = 1.5) +
xlab("") + ylab("Exploitation Rate (Proportional)") + ylim(c(0,0.2)) + scale_x_continuous(breaks = seq(1980,2030,by=3))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_exploit_time_series.png"),exploit.plot,base_width = 11,base_height = 8.5)
# Same plots but remvoing missing survey years....
bm.ts.plot <- ggplot(pred.proc$log_processes %>% dplyr::filter(year < c(2015))) +
geom_line(aes(year,exp(log_B)),color='firebrick2',linewidth=1.5) + geom_ribbon(aes(ymin=totB.LCI,ymax=totB.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
geom_line(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2016:2019), aes(year,exp(log_B)),color='firebrick2',linewidth=1.5) +
geom_ribbon(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2016:2019), aes(ymin=totB.LCI,ymax=totB.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
geom_line(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2021:2022), aes(year,exp(log_B)),color='firebrick2',linewidth=1.5) +
geom_ribbon(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2021:2022), aes(ymin=totB.LCI,ymax=totB.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
ylim(c(0,1.6e4)) + xlab("") + ylab("Fully Recruited Biomass (tonnes)") + scale_x_continuous(breaks = seq(1980,2030,by=3))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Biomass_time_series_no_missing_surveys.png"),bm.ts.plot,base_width = 11,base_height = 8.5)
# Recruit time seris
rec.ts.plot <- ggplot(pred.proc$log_processes %>% dplyr::filter(year < c(2015))) +
geom_line(aes(year,exp(log_R)),color='firebrick2',linewidth=1.5) + geom_ribbon(aes(ymin=totR.LCI,ymax=totR.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
geom_line(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2016:2019), aes(year,exp(log_R)),color='firebrick2',linewidth=1.5) +
geom_ribbon(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2016:2019), aes(ymin=totR.LCI,ymax=totR.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
geom_line(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2021:2022), aes(year,exp(log_R)),color='firebrick2',linewidth=1.5) +
geom_ribbon(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2021:2022), aes(ymin=totR.LCI,ymax=totR.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
xlab("") + ylab("Recruit Biomass (tonnes)") + ylim(c(0,3.5e3)) + scale_x_continuous(breaks = seq(1980,2030,by=3))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Recruit_time_series_no_missing_surveys.png"),rec.ts.plot,base_width = 11,base_height = 8.5)
# Natural mortality time series...
mort.ts.plot <- ggplot(pred.proc$log_processes %>% dplyr::filter(year < c(2015))) +
geom_line(aes(year,exp(log_m)),color='firebrick2',linewidth=1.5) + geom_ribbon(aes(ymin=m.LCI,ymax=m.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
geom_line(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2016:2019), aes(year,exp(log_m)),color='firebrick2',linewidth=1.5) +
geom_ribbon(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2016:2019), aes(ymin=m.LCI,ymax=m.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
geom_line(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2021:2022), aes(year,exp(log_m)),color='firebrick2',linewidth=1.5) +
geom_ribbon(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2021:2022), aes(ymin=m.LCI,ymax=m.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
xlab("") + ylab("Natural mortality (Instantaneous)") + ylim(c(0,1.5)) + scale_x_continuous(breaks = seq(1980,2030,by=3))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_nat_mort_time_series_no_missing_surveys.png"),mort.ts.plot,base_width = 11,base_height = 8.5)
# Explotation Rate Time Series
exploit.plot <- ggplot(ann.exploit%>% dplyr::filter(year < c(2015))) + geom_line(aes(x=year,y=exploit),linewidth = 1.5) +
geom_line(data = ann.exploit%>% dplyr::filter(year %in% 2016:2019), aes(x=year,y=exploit),linewidth = 1.5) +
geom_line(data= ann.exploit%>% dplyr::filter(year %in% 2021:2022),aes(x=year,y=exploit),linewidth = 1.5) +
xlab("") + ylab("Exploitation Rate (Proportional)") + ylim(c(0,0.2)) + scale_x_continuous(breaks = seq(1980,2030,by=3))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_exploit_time_series_no_missing_surveys.png"),exploit.plot,base_width = 11,base_height = 8.5)
|
/Model/Sab_SEBDAM.R
|
no_license
|
freyakeyser/Framework
|
R
| false
| false
| 56,988
|
r
|
# So here we'll try and get all the data in the correct structure for the spatial model.
library(SEBDAM)
library(tidyverse)
library(sf)
library(stringr)
library(optimx)
library(parallel)
library(INLA)
library(ggthemes)
library(cowplot)
# Download the function to go from inla to sf
funs <- c("https://raw.githubusercontent.com/Mar-Scal/Assessment_fns/master/Fishery/logs_and_fishery_data.r",
"https://raw.githubusercontent.com/Mar-Scal/Assessment_fns/master/Maps/pectinid_projector_sf.R",
"https://raw.githubusercontent.com/Mar-Scal/Assessment_fns/master/Maps/convert_inla_mesh_to_sf.R"
)
# Now run through a quick loop to load each one, just be sure that your working directory is read/write!
for(fun in funs)
{
download.file(fun,destfile = basename(fun))
source(paste0(getwd(),"/",basename(fun)))
file.remove(paste0(getwd(),"/",basename(fun)))
}
# Get the Sab area outline...
Sab.shape <- st_read("D:/Github/GIS_layers/survey_boundaries/Sab.shp", quiet=T)
# Bring in the survey data from the NAS
# load("Y:/Offshore/Assessment/Data/Survey_data/2022/Survey_summary_output/Survey_all_results.Rdata")
# surv.dat <- surv.dat$Sab
# saveRDS(surv.dat,'D:/Github/BBn_model/Results/Sab_surv.dat.RDS')
surv.dat <- readRDS('D:/Github/BBn_model/Results/Sab_surv.dat.RDS')
# Need to get condition factor out of here too
#mod.dat <- survey.obj$Sab$model.dat
#saveRDS(mod.dat,'D:/Github/BBn_model/Results/Sab_model.dat.RDS')
mod.dat <- readRDS('D:/Github/BBn_model/Results/Sab_model.dat.RDS')
#load("F:/NAS/Offshore/Assessment/Data/Model/2022/Sab/Model_input_midpoint.RData")
# Bring in the fishery data
# logs_and_fish(loc="offshore",year = 1986:2022,direct="Y:/Offshore/Assessment/", get.marfis=F)
# fish.dat<-merge(new.log.dat,old.log.dat,all=T)
# fish.dat$ID<-1:nrow(fish.dat)
# Now we can clip both of these to subset it to the data that I think we need for the analysis....
# First the fishery data
# Sab.fish <- fish.dat %>% dplyr::filter(bank == "Sab")
# # There are 12 data points at 0,0 that we remove, I'm not worried about accounting for these 12 points!
# Sab.fish <- Sab.fish %>% dplyr::filter(lat !=0 | lon != 0)
# # Now I want to put a 'survey year' on these because that's what we're gonna need for our modelling... start by porting over the year
# Sab.fish$survey.year <- Sab.fish$year
# # DK NOTE: Now this is going to get confusing for us and we may want to tweak SEBDAM for this, but that's a down the road job, not a playing around with model job
# # But based on the indexing in SEBDAM, I am going to change how we index the survey year data from what we have done with offshore traditionally.
# # Historically anything from the last half of the year goes into the following years, eg. survey.year 2002 = June 2001- May 2002.
# # But in SEBDAM we have (B(t-1) - C(t-1)), so let's say we have year 2000 survey biomass, this says we remove the 2000 catch from that
# # we want that catch to be the catch from June 2000 to May 2001, i.e. we remove the catch before we allow the population to grow
# # This is what we do in our current model, but we have a different index (C(t) on our model.
# # Basically survey year 2002 = June 2002 - May 2003 now
# #DK note: We probably should think more about the survey year fun and how exactly we want to handle removal of catch in our models.
# Sab.fish$month <- lubridate::month(Sab.fish$date)
# Sab.fish$survey.year[Sab.fish$month %in% c("January","February","March","April","May")] <- Sab.fish$survey.year[Sab.fish$month %in% c("January","February","March","April","May")] -1
# # Add a fake 2022 data point as there were no removals in 2022
# Sab.fish[nrow(Sab.fish)+1,] <- NA
# Sab.fish$pro.repwt[nrow(Sab.fish)] <- 0
# Sab.fish$year[nrow(Sab.fish)] <- 2022
# Sab.fish$survey.year[nrow(Sab.fish)] <-2022
# Sab.fish$lon[nrow(Sab.fish)] <- -61.68767
# Sab.fish$lat[nrow(Sab.fish)] <- 43.63017
#saveRDS(Sab.fish,'D:/Github/BBn_model/Results/Fishery_data/Sab_fish.dat.RDS')
Sab.fish <- readRDS('D:/Github/BBn_model/Results/Fishery_data/Sab_fish.dat.RDS')
Sab.fish$pro.repwt <- Sab.fish$pro.repwt/1000 # It looks like what I saved is already in tonnes.
# Set up some stuff
repo.loc <- "D:/Github/BBn_model/"
mod.select <- "TLM"
atow<-800*2.4384/10^6 # area of standard tow in km2
num.knots <- 4
years <- 1995:2022
NY <- length(years)
c_sys <- 32620
# Transform Sable to 32620
Sab.shape <- Sab.shape %>% st_transform(crs = c_sys) # Sab is totally in 32620 border so think they are basically equivalent options here
# Just going to use the core area to see if that helps model and the prediction function...
#Sab.tst <- st_cast(Sab.shape, "POLYGON")
#Sab.shape <-Sab.tst[1,]
# OK, so step 1 here is getting the model input that Raphael needs for the model
# The survey data....
live.subset <- surv.dat %>% dplyr::filter(state == 'live')
dead.subset <- surv.dat %>% dplyr::filter(state== "dead")
live.input <- data.frame(I = live.subset$com.bm, IR = live.subset$rec.bm,year = live.subset$year,tow = live.subset$tow,tot.live.com = live.subset$com,lat = live.subset$lat,lon=live.subset$lon)
# Here's an option to use different size classes to see what happens...
# Here is one where we expand the recruit sizes to be 70-90 mm
#live.input <- data.frame(I = live.subset$com.bm, IR = (live.subset$`bin_70-80_bm` +live.subset$`bin_80-90_bm`) ,year = live.subset$year,tow = live.subset$tow,tot.live.com = live.subset$com,lat = live.subset$lat,lon=live.subset$lon)
# Here is one where 50-80 is recruits and 80+ is Fully recruited
# live.input <- data.frame(I = (live.subset$`bin_90-120_bm` + live.subset$`bin_120_plus_bm` + live.subset$`bin_80-90_bm`),
# IR = (live.subset$`bin_50-70_bm` + live.subset$`bin_70-80_bm`),
# year = live.subset$year,tow = live.subset$tow,tot.live.com = live.subset$com,lat = live.subset$lat,lon=live.subset$lon)
clap.input <- data.frame(L = dead.subset$com,tow = dead.subset$tow,year = dead.subset$year)
mod.input <- left_join(live.input,clap.input,by=c('tow','year'))
mod.input$N <- round(mod.input$tot.live.com + mod.input$L)
# Looks like there are no values > 0 but < 0.5, so the low clapper numbers should all round up to 1 (which makes sense as you'd only get < 0.5 if we had tows twice as long as they should be)
mod.input$L <- round(mod.input$L)
mod.input.sf <- st_as_sf(mod.input,coords = c('lon','lat'),remove=F,crs = 4326)
mod.input.sf <- mod.input.sf %>% st_transform(crs=c_sys)
mod.input.sf <- mod.input.sf %>% dplyr::filter(year %in% years)
mod.input.sf <- st_intersection(mod.input.sf,Sab.shape)
#mod.input.sf[nrow(mod.input.sf)+1,] <- mod.input.sf[nrow(mod.input.sf),]
#mod.input.sf$year[nrow(mod.input.sf)] <- 2015
# Now I need to get the I and IR into kg/km^2
mod.input.sf$Year <- mod.input.sf$year - (min(years)-1)
mod.input.sf$I <- mod.input.sf$I/atow
mod.input.sf$IR <- mod.input.sf$IR/atow
#survey.obj$Sab$model.dat
# Adding missing survey years. THe L and N both need 'data', but 0's are fine, so that'll do the trick!
mod.input.sf[nrow(mod.input.sf)+1,] <- mod.input.sf[nrow(mod.input.sf),]
#mod.input.sf[nrow(mod.input.sf),] <- mod.input.sf[nrow(mod.input.sf)-1,]
mod.input.sf$year[nrow(mod.input.sf)] <- 2015
mod.input.sf$Year[nrow(mod.input.sf)] <- which(years == 2015)
mod.input.sf$I[nrow(mod.input.sf)] <- NA
mod.input.sf$IR[nrow(mod.input.sf)] <- NA
mod.input.sf$tot.live.com[nrow(mod.input.sf)] <- NA
mod.input.sf$L[nrow(mod.input.sf)] <- 0
mod.input.sf$N[nrow(mod.input.sf)] <- 0
# And repeat for 2020
mod.input.sf[nrow(mod.input.sf)+1,] <- mod.input.sf[nrow(mod.input.sf),]
#mod.input.sf[nrow(mod.input.sf),] <- mod.input.sf[nrow(mod.input.sf)-1,]
mod.input.sf$year[nrow(mod.input.sf)] <- 2020
mod.input.sf$Year[nrow(mod.input.sf)] <- which(years == 2020)
mod.input.sf$I[nrow(mod.input.sf)] <- NA
mod.input.sf$IR[nrow(mod.input.sf)] <- NA
mod.input.sf$tot.live.com[nrow(mod.input.sf)] <- NA
mod.input.sf$L[nrow(mod.input.sf)] <- 0
mod.input.sf$N[nrow(mod.input.sf)] <- 0
# Now clip mod.input.sf to the right nubmer of years...
mod.input.sf <- mod.input.sf %>% dplyr::filter(year %in% years)
# Growth!!
mod.growth.dat <- mod.dat
# # Grab the growth data, we have ageing data from 1980's that I'm going to use to calculate growth here.
# Data is coming from ageing data in 1989, found here.... Y:\Offshore\Assessment\Data\Ageing\archive\old_ageing_from_Amy_2022\SAB height at age 1989_2.pdf
L.inf <- 136.628
#to <- 1.337 # So this uses a 1 year offset that we no longer believe in, going to make this 0.337 to align more with what we now do...
to <- 0.337
K <- 0.2269
# This is weight in this year, which becomes t-1
waa.tm1 <- mod.growth.dat$CF*(mod.growth.dat$l.bar/100)^3 # Average(ish) weight of commerical sized scallop in the current year
# Using this years average shell height we can find the exptected shell height for the scallops in the next year
# ht = (Linf * (1-exp(-K)) + exp(-K) * height(last year))
# laa.t is the projected size of the current years scallops into next year.
laa.t <- L.inf*(1-exp(-K)) + exp(-K) * mod.growth.dat$l.bar
# The c() term in the below offsets the condition so that current year's condition slots into the previous year and repeats
# the condition for the final year), this effectively lines up "next year's condition" with "predictied shell height next year (laa.t)
# This gets us the predicted weight of the current crop of scallops next year based on next years CF * laa.t^3
# Of course we don't have next years condition thus th last condition is simply repeated
# waa.t is using the condition from next year and the growth from next year to get next years weight
waa.t <- c(mod.growth.dat$CF[-1],mod.growth.dat$CF[nrow(mod.growth.dat)])*(laa.t/100)^3
# Here we use the current condition factor to calculate the weight next year (since we use laa.t)
# That's really the only difference between waa.t and waa.t2, waa.t uses next years condition to project growth
# what waa.t2 uses the current condition to project growth. So that's really what we are comparing here with these
# two growth metrics isn't it, this is really just comparing impact of using current vs. future condition factor on our growth estimates.
waa.t2 <-mod.growth.dat$CF*(laa.t/100)^3
# Now the growth, expected and realized.
mod.growth.dat$g <- waa.t/waa.tm1
# This is using the actual condition factor and growing the scallops by laa.t
mod.growth.dat$g2 <- waa.t2/waa.tm1
# same thing here but for the recruits
waa.tm1 <-mod.growth.dat$CF*(mod.growth.dat$l.k/100)^3
laa.t <- L.inf*(1-exp(-K))+exp(-K)*mod.growth.dat$l.k
waa.t <- c(mod.growth.dat$CF[-1],mod.growth.dat$CF[nrow(mod.growth.dat)])*(laa.t/100)^3
waa.t2 <- mod.growth.dat$CF*(laa.t/100)^3
mod.growth.dat$gR <- waa.t/waa.tm1
mod.growth.dat$gR2 <- waa.t2/waa.tm1# setwd("C:/Assessment/2014/r")
# Need to replace 2019 and 2020 values because of missing 2020 survey. 2021 is NOT influenced by missing data in 2020, it's only 2019 and 2020 that need imputed
# how we have everything set up. I am going to do this the same way we have set it up for BBn and GB for consistency sake.
# Now we fill in 2014, 2015, 2019 and 2020 because of missing surveys in 2015 and 2020
mod.growth.dat[nrow(mod.growth.dat)+ 1,] <- NA
mod.growth.dat$year[nrow(mod.growth.dat)] <- 2020
mod.growth.dat[nrow(mod.growth.dat)+ 1,] <- NA
mod.growth.dat$year[nrow(mod.growth.dat)] <- 2015
mod.growth.dat <- mod.growth.dat[order(mod.growth.dat$year),]
# Fill in 2019 and 2020 with LTM growth before 2020.
mod.growth.dat$g[which(mod.growth.dat$year %in% c(2014:2015,2019:2020))] <- median(mod.growth.dat$g[mod.growth.dat$year<2020], na.rm=T)
mod.growth.dat$g2[which(mod.growth.dat$year %in% c(2015,2020))] <- median(mod.growth.dat$g2[mod.growth.dat$year<2020], na.rm=T)
mod.growth.dat$gR[which(mod.growth.dat$year %in% c(2014:2015,2019:2020))] <- median(mod.growth.dat$gR[mod.growth.dat$year<2020], na.rm=T)
mod.growth.dat$gR2[which(mod.growth.dat$year %in% c(2015,2020))] <- median(mod.growth.dat$gR2[mod.growth.dat$year<2020], na.rm=T)
# Turn this into a vector and add a value for next year.
growth <- data.frame(g = c(mod.growth.dat$g,mod.growth.dat$g2[nrow(mod.growth.dat)]),
gR = c(mod.growth.dat$gR,mod.growth.dat$gR2[nrow(mod.growth.dat)]),
year = c(mod.growth.dat$year,max(mod.growth.dat$year+1)))
growth <- growth %>% dplyr::filter(year %in% c(years,(max(years)+1)))
# # We need to make up some data for 2020 since there wasn't a survey. We could also do it this way, result is very simlar
# # First we take them mean CF between 2019 and 2021
# mod.growth.dat$CF[mod.growth.dat$year == 2020] <- median(mod.growth.dat$CF[mod.growth.dat$year %in% 2019:2021],na.rm=T)
# mod.growth.dat$l.bar[mod.growth.dat$year == 2020] <- median(mod.growth.dat$l.bar[mod.growth.dat$year %in% 2019:2021],na.rm=T)
# mod.growth.dat$l.k[mod.growth.dat$year == 2020] <- median(mod.growth.dat$l.k[mod.growth.dat$year %in% 2019:2021],na.rm=T)
#von.B <- function(L.inf,to,K) {L <- L.inf*(1-exp(-K*(age-t0)))}
# # GB
# k = 0.22
# Linf <- 149
# t0 <- 0.22
# age=3:6
#
# Linf*(1-exp(-K*(age-t0)))
#
# #BBN
# k = 0.19
# Linf <- 148
# t0 <- 0.11
# age=3:6
#
# Linf*(1-exp(-K*(age-t0)))
# Here are the Sable parameters, note that I believe the to is due to differences in ageing that was done in the
# 1980s-early 2000s which we have moved away from since 2010, this aligns us with NOAA methods
# #Sab
# k = 0.2269
# Linf <- 136
# t0 <- 1.337
# age=3:6
#
# Linf*(1-exp(-K*(age-t0)))
#
#growth <- data.frame(g = runif(NY+1,1.1,1.2),gR = runif(NY+1,1.2,1.4))
# Subset the fishery data to the correct years. We need to take on next year catch too..)
Sab.fish <- Sab.fish %>% dplyr::filter(survey.year %in% years) # c(years,(max(years)+1))
#tst <- Sab.fish.sf %>% dplyr::filter(survey.year %in% 1993:2014)
#rems <- tst %>% dplyr::group_by(year) %>% dplyr::summarise(tot = sum(pro.repwt,na.rm=T))
Sab.fish.sf <- st_as_sf(Sab.fish,coords = c("lon","lat"),remove =F, crs = 4326)
Sab.fish.sf <- Sab.fish.sf %>% st_transform(crs= c_sys)
# Now lets clip this to be data inside of our Sab boundary.
Sab.fish.sf <- st_intersection(Sab.fish.sf,Sab.shape)
# Check removals each fishing year calculated using this data
Sab.fish.by.year <- Sab.fish.sf %>% dplyr::group_by(year) %>% dplyr::summarise(tot = sum(pro.repwt,na.rm=T))
Sab.fish.by.survey.year <- Sab.fish.sf %>% dplyr::group_by(survey.year) %>% dplyr::summarise(tot = sum(pro.repwt,na.rm=T))
# So this looks reasonable in the most recent years, but I probably need to check the early years to see if we are missing any of the removals, from above check (only 12 points removed) it
# seems like we might be fine, but need to check against our historical Removals estimates...
#tail(Sab.fish.by.year)
#tail(Sab.fish.by.survey.year)
# OK, so now let's see if we can use the catch knot thing Raph made to split this up withing the Sab domain
#We just need 3 columns for this
catch.sf <- Sab.fish.sf %>% dplyr::select(pro.repwt,survey.year)
names(catch.sf) <- c("Catch","Year","geometry")
# For the moment we need to have this starting at year 1.
catch.sf$Year <- catch.sf$Year - (min(years)-1)
# Get rid of any 0s (due to survey year fun things)
#catch.sf$Catch <- catch.sf$Catch
#catch.sf$geometry <- catch.sf$geometry/1000
# A map...
# b.map <- pecjector(area= "Sab",c_sys = c_sys,add_layer = list(land = 'grey',eez = 'eez' , nafo = 'main',sfa = 'offshore',survey = c("offshore","outline")),txt.size=8,axes = "DM")
# Sab.fish.map <- b.map + geom_sf(data = Sab.fish.sf) + facet_wrap(~year) + geom_sf(data= Sab.shape,fill = NA)
# Sab.fish.map
# Set up our mesh...
#Sab.mesh <- setup_mesh(catch.sf,model_bound = Sab.shape,nknot=8,seed=20) # Seeds 20 and 66 work
#Sab.shape$geometry <- Sab.shape$geometry/1000
#st_crs(Sab.shape) <- 32619
#mod.input.sf$geometry <- mod.input.sf$geometry/1000
#st_crs(mod.input.sf) <- 32619
Sab.mesh <- setup_mesh(mod.input.sf,model_bound = Sab.shape,nknot=num.knots, max.edge = c(8,20),cutoff=2.5,seed=34)
Sab.mesh.sf <- inla.mesh2sf(Sab.mesh$mesh)
Sab.mesh.sf$triangles$geometry <- Sab.mesh.sf$triangles$geometry*1000
Sab.mesh.sf$vertices$geometry <- Sab.mesh.sf$vertices$geometry*1000
st_crs(Sab.mesh.sf$triangles) <- c_sys
st_crs(Sab.mesh.sf$vertices) <- c_sys
knots.sf <- st_as_sf(as.data.frame(Sab.mesh$knots$centers), coords = c("X","Y"))
knots.sf$geometry <- knots.sf$geometry*1000
st_crs(knots.sf) <- c_sys
# Plot the mesh
#ggplot(Sab.mesh.sf$triangles) + geom_sf() + geom_sf(data= Sab.shape,fill = NA,color = 'blue',size=2) + geom_sf(data = knots.sf,fill = NA)
# Now make the prediction grid
pred.grid<-setup_pred_grid(knots=Sab.mesh$knots,model_bound=Sab.mesh$utm_bound)
st_crs(pred.grid$grid) <- c_sys
# Plot the grid
#ggplot(pred.grid$grid) + geom_sf(aes(fill = as.factor(knotID))) + scale_fill_viridis_d()
# Get the knots on the right scale
#knots.on.right.scale <- Sab.mesh$knots
#knots.on.right.scale$centers <- knots.on.right.scale$centers*1000
#Sebdam catches
catchy <- catch_spread(catch = catch.sf,knots = Sab.mesh$knots)
catchy$sum_catches <- catchy$sum_catches[,-1] # A hack until Raph gets new model code up and running.
# For now we need to toss the first column from there
#catchy$density_catches <- catchy$density_catches[,-1]
#catchy$sum_catches <- catchy$sum_catches[,-1]
catchy
#TLM catch
catch.tlm <- catch.sf %>% group_by(Year,.drop=F) %>% dplyr::summarise(catch = sum(Catch,na.rm=T))
# ggplot(mod.input.sf) + geom_boxplot(aes(x=Year,y=I,group=Year)) + scale_y_log10()
# ggplot(mod.input.sf) + geom_boxplot(aes(x=Year,y=IR,group=Year))+ scale_y_log10()
# ggplot(mod.input.sf) + geom_boxplot(aes(x=Year,y=N,group=Year))+ scale_y_log10()
# ggplot(mod.input.sf) + geom_boxplot(aes(x=Year,y=L,group=Year))+ scale_y_log10()
# Fart around with inputs and see if that helps...
#mod.input.sf$L <- 3*mod.input.sf$L # This seemed to help when running with 10 years, but blew up with 20
#mod.input.sf$L[mod.input.sf$L > mod.input.sf$N] <- 0.5*mod.input.sf$N[mod.input.sf$L > mod.input.sf$N]
# Maybe I'm seeing too many recruits??
#mod.input.sf$IR <- mod.input.sf$IR/2.5
# What happens if I triple the number of Fully Recruited?
#mod.input.sf$I <- 3* mod.input.sf$I
# Here I try to make the IR's a fraction of the I in the following year, which may/may not be overly complicating the issue...
# for(i in 1:NY)
# {
# if(i < NY & years[i] != 2015) mod.input.sf$IR[mod.input.sf$year == years[i]] <- runif(mod.input.sf$IR[mod.input.sf$year == (years[i])],0.01,0.1)*mean(mod.input.sf$I[mod.input.sf$year == (years[i]+1)])
# if(i == NY) mod.input.sf$IR[mod.input.sf$year == years[i]] <- runif(mod.input.sf$IR[mod.input.sf$year == (years[i])],0.01,0.1)*mean(mod.input.sf$I[mod.input.sf$year == (years[i])])
# }
#SEBDAM version
if(mod.select == "SEAM")
{
set_data<-data_setup(data=mod.input.sf,growths=growth,catch=as.data.frame(catchy$sum_catches),
model="SEBDAM",mesh=Sab.mesh$mesh,obs_mort=T, prior=TRUE,prior_pars=c(10,12),
mult_qI=T,spat_approach="spde",
knot_obj=Sab.mesh$knots,knot_area=pred.grid$area,separate_R_aniso =T,all_se=FALSE)
# So this will fix the mean value of m0 to be whatever the intial value is set at. Let's see what happens!
set_data$par$log_m0 <- 0 # 0 = 1
set_data$par$log_R0 <- 5.9915 # 5.3 = 200, 5 = 148, 4 = 55, 5.9915 = 400, 4.606 = 100
#set_data$par$log_qR <- -1.5
#set_data$map <-list(log_m0=factor(NA))
set_data$map <-list(log_m0=factor(NA),log_R0 = factor(NA))
#set_data$map <-list(log_m0=factor(NA),log_R0 = factor(NA),log_qR = factor(NA))
}
#TLM version, Dude is this ever sensitive to the q priors! (5,12) actually looks solid in terms of results... maybe we can get so lucky with SEBDAM :-)
# Note that the Catch time series should be 1 year longer than the survey data here!!
if(mod.select == "TLM")
{
set_data<-data_setup(data=as.data.frame(mod.input.sf),growths=growth[,1:2],catch=catch.tlm$catch,
model="TLM",obs_mort=T,prior=T,prior_pars=c(10,12))
set_data$par$log_q_R <- -0.7 #-0.7 # Around 0.5, similar to some of the seam models.
#set_data$par$log_R0 <- 4 # 5.3 = 200, 5 = 148, 4 = 55, 6 = 400
#set_data$par$log_qR <- -1.5
#set_data$map <-list(log_m0=factor(NA),log_R0 = factor(NA),log_qR = factor(NA))
set_data$map <-list(log_q_R=factor(NA))
}
#save.image("D:/Github/BBn_model/Results/sab_input.RData")
#load("D:/Github/BBn_model/Results/sab_input.RData")
#Mcdonald, Raphael well did you look at the data after data_setup?
#like set_data$data, look at n_tows and pos_tows_I
#if you get an n_tows=0, that'll tell you what year something weird is happening, narrows your scope
#set_data$par$log_R0 <- 10 # Testing to see if I start this lower if it impacts our recruitment estimates.
str(set_data$data)
#obj<-TMB::MakeADFun(data=set_data$data,parameters=set_data$par,random=set_data$random,map=set_data$map,DLL="SEBDAM",silent=F)
#Opt<-try(stats::nlminb(start=obj$par,obj=obj$fn,gr=obj$gr,control=control),T)
#Report <- obj$report()
#mod.fit <- readRDS("D:/Github/BBn_model/Results/Models/Sab_model_output_1993_2022_5_knots_m_0_3.Rds")
#set_data$par$log_B0 <- 6 # If you get an NA error in the model below, set log_B0 to a super high number
#set_data$par$log_R0 <- 15
#set_data$par$log_m0 <- -1
mod.fit<-fit_model(set_data,silent=F)
# Now save the results appropriately
if(mod.select != "TLM")
{
m0.par <- exp(set_data$par$log_m0)
r0.par <- signif(exp(set_data$par$log_R0),digits=2)
scenario.select <- paste0(min(years),"_",max(years),"_vary_m_m0_",m0.par,"_R0_",r0.par,"_",num.knots,"_knots")
saveRDS(mod.fit,paste0(repo.loc,"Results/Models/Sab/Sab_",mod.select,"_model_output_",scenario.select,".Rds"))
saveRDS(Sab.mesh,paste0(repo.loc,"Results/Models/Sab/Sab_",mod.select,"_model_output_",scenario.select,"_mesh.Rds"))
saveRDS(pred.grid,paste0(repo.loc,"Results/Models/Sab/Sab_",mod.select,"_model_output_",scenario.select,"_predict_grid.Rds"))
}
if(mod.select == "TLM")
{
qR.par <- sub("0.","0_",signif(exp(set_data$par$log_q_R),digits=2))
scenario.select <- paste0(min(years),"_",max(years),"_qR_",qR.par)
saveRDS(mod.fit,paste0(repo.loc,"Results/Models/Sab/Sab_",mod.select,"_model_output_",scenario.select,".Rds"))
}
# unique(names(mod.fit$sdrep$value))
# params<-get_parameters(mod.fit)
# str(params)
# rownames(params)
# #write.csv(params,"D:/Github/BBn_model/Results/Model/Sab_parms.csv")
#
# pred.proc<-get_processes(mod.fit)
# str(pred.proc)
# summary(pred.proc$densities$m)
#
# plot(c(years,max(years)+1),pred.proc$totals$totB, type = 'b',xlab="",ylab="Total Biomass (tonnes)")
# plot(c(years,max(years)+1),pred.proc$totals$totR, type = 'b',xlab="",ylab = "Recruits")
# plot(c(years,max(years)+1),pred.proc$totals$mean_m, type = 'b',xlab="",ylab = "Natural mortality")
# #TLM
# plot(c(years,max(years)+1),pred.proc$process$B, type = 'b',xlab="",ylab="Total Biomass (tonnes)")
# plot(c(years,max(years)+1),pred.proc$process$R, type = 'b',xlab="",ylab = "Recruits")
# plot(c(years,max(years)+1),pred.proc$process$m, type = 'b',xlab="",ylab = "Natural mortality")
#summary(pred.proc$process$m)
# Ideas from Raph... perhaps a knot with a lot of recruit 0's could be an issue
# If we have a knot with a 0 in the first year, that could send the model off to la-la land
# Increasing the L/N ratio didn't help with S/m estimates
# Incresing the L/N ratio and decreasing the number of Recruits didn't help with S/m estimates
# decreasing the number of recruits by >50% seems to make the qR estimate much more sensisble
# Tripling the biomass of fully recruited also helps, but still nothing is working with the natural mortality.
# changing the length of the time series hasn't helped with much of anything, but does give unstable results.
# Using the SPDE model hasn't seemed to help
# Increasing FR biomass artificially by a factor of 3 does nothing, combining that with a more realistic
# natural
# Using a fixed m of 0.1 starting in 1986,1991, and 1995 all results in the qR being > 1, 1997 was false convergences with these settings.
# Moving to 1999 make qR kinda reasonable, bit high (about 0.55) but reasonable nonetheless.
# Going to 2002 and qR gets worse again... I can't get model to go with 1998 data for some reason...
# Using 2000 and qR goes above 1, start year is very sensistive.
# I tried going from 1999 with the recruits being 70-90 mm and the FR being 90+, that didn't do anything useful, qR was > 1.8.
# I tried going from 1999 with recruits being 50-80 and FR being 80+ and..... nothing good happened.
# What does work is fixing m to be relatively high (around 0.3) and lowering the number of knots, 4 and 10 seem to work
# Other numbers end up with the 0 recruits problem.
# So here are my initial suite of models to compare.
################################################### End the initial model runs ###########################################
################################################### End the initial model runs ###########################################
################################################### End the initial model runs ###########################################
##################### Now load the model and make the figures! ##############################################
atow<-800*2.4384/10^6 # area of standard tow in km2
num.knots <- 4
RO <- 55
qR <- "0_5" # This is just for TLM models
years <- 1995:2022
NY <- length(years)
c_sys <- 32620
theme_set(theme_few(base_size = 22))
repo.loc <- "D:/Github/BBn_model/"
mod.select <- "SEAM"
################################################### End the initial model runs ###########################################
### Make the figures for the models
if(mod.select != "TLM") scenario.select <- paste0(min(years),"_",max(years),"_vary_m_m0_1_R0_",RO,"_",num.knots,"_knots")
if(mod.select == "TLM") scenario.select <- paste0(min(years),"_",max(years),"_qR_",qR)
mod.fit <- readRDS(paste0(repo.loc,"Results/Models/Sab/Sab_",mod.select,"_model_output_",scenario.select,".Rds"))
if(mod.select != "TLM") catchy <- mod.fit$obj$env$data$C*mod.fit$obj$env$data$area # Get this into tonnes from catch density.
if(mod.select == "TLM") catchy <- mod.fit$obj$env$data$C
# This is only needed for SEAM.
if(mod.select != "TLM")
{
pred.grid <- readRDS(paste0(repo.loc,"Results/Models/Sab/Sab_",mod.select,"_model_output_",scenario.select,"_predict_grid.Rds"))
# Now set up to run the figures
# Now set up to run the figures
matYear<-c(rep(c(years,(max(years)+1)),each=num.knots))
matYear1<-c(rep(years,each=num.knots))
knots<-rep(1:num.knots,NY+1)
knots1<-rep(1:num.knots,NY)
grid.gis <- pred.grid$grid
grid.gis$geometry <- grid.gis$geometry*1000
st_crs(grid.gis) <- 32620
# Now simplify the grid for the spatial plots...
knot.gis <- aggregate(grid.gis, list(grid.gis$knotID), function(x) x[1])
# Get the spatial data output
B<-data.frame(B=as.vector(mod.fit$report$B),Year=matYear,knotID=knots)
B.dat.plot<-left_join(knot.gis,B,by=c("knotID"))
# Recruits
R<-data.frame(R=as.vector(mod.fit$report$R),Year=matYear1, knotID=knots1)
R.dat.plot<-left_join(knot.gis,R,by=c("knotID"))
#Natural Mortality
m<-data.frame(m=as.vector(mod.fit$report$m),Year=matYear,knotID=knots)
m.dat.plot<-left_join(knot.gis,m,by=c("knotID"))
#Spatial q's
qI<-data.frame(qI=as.vector(mod.fit$report$qI),knotID=unique(knots))
q.dat.plot<-left_join(knot.gis,qI,by=c("knotID"))
# Explotation
# Now lets try and make a spatial exploitation rate plot. I'm not 1000% sure this is how I want to calculate this, but I think it is... sort it out :-)
F.dat<-data.frame(B=as.vector(mod.fit$report$areaB[,-ncol(mod.fit$report$areaB)]/1000),
C = as.vector(as.matrix(catchy)), Year=matYear1, knotID=knots1)
F.dat <- F.dat %>% dplyr::mutate(exploit = C/(B+C)) # Sticking with how offshore does this (C/(B+C)) C/B of some variant may be more realistic
F.dat.plot<-left_join(knot.gis,F.dat,by=c("knotID"))
# To get a weighted m time series combing the B, R, and the m data above
bmr <- data.frame(B=as.vector(mod.fit$report$areaB/1000),
m=as.vector(mod.fit$report$m),
R=c(as.vector(mod.fit$report$areaR/1000),rep(NA,num.knots)),
Year=matYear, knotID=knots)
# Now get an m estimate for the whole area for B, R, and both, I think easiest will be to get totB and totR by year in here...
tBR <- bmr %>% group_by(Year) %>% dplyr::summarise(totB = sum(B,na.rm=T),
totR = sum(R,na.rm=T),
totSSB = sum(B,na.rm=T) + sum(R,na.rm=T))
# Now merge that with bmr...
bmr <- left_join(bmr,tBR,by="Year")
# Now getting weighted means by year should be easy...
nat.mat <- bmr %>% group_by(Year) %>% dplyr::summarise(m.FR = sum(B*m/totB),
m.R = sum(R*m/totR),
m.all = sum((R+B)*m/totSSB))
nat.mat$Raph.m <- mod.fit$report$mean_m
# Now make them long...
nat.mat.plt <- pivot_longer(nat.mat,!Year,names_to = "method",values_to = 'm')
nat.mat.plt$method <- factor(nat.mat.plt$method,levels = c("m.all","Raph.m","m.FR","m.R"),labels = c("Weighted","Unweighted","Fully Recruited","Recruits"))
# If not already loaded, make a map
#b.map <- pecjector(area= "BBn",c_sys = c_sys,add_layer = list(land = 'grey',eez = 'eez' , nafo = 'main',sfa = 'offshore',survey = c("offshore","outline")),txt.size=8,axes = "DM")
# Final piece is that we can compare the biomass knot by knot to what would be expected based on B/R/m/g/C alone. This is a bit messy but this does the trick...
gs <- mod.fit$obj$env$data$gI
gRs <- mod.fit$obj$env$data$gR
tmp <- NULL
for(i in 1:(NY-1))
{
Bs.tot <- mod.fit$report$B*mod.fit$obj$env$data$area/1000
Rs.tot <- mod.fit$report$R*mod.fit$obj$env$data$area/1000
ms <- mod.fit$report$m
Bst <- (exp(-ms[,i+1]))*gs[i]*(Bs.tot[,i]-catchy[,i])
Rst <- (exp(-ms[,i+1]))*gRs[i]*(Rs.tot[,i])
B2 <- Bst + Rst
if(any(B2 < 0)) {B2[B2<= 0] <- 5; print("HEADS UP!! You have a negative B2 estimate (look for the 5s in B.exp).")}
B.next <- Bs.tot[,i+1]
B.diff <- B2 - B.next
B.per.diff <- 100*((B2 - B.next)/B.next)
n.not <- length(B2)
if(i == 1) tmp[[as.character(years[i])]] <- data.frame(knot = 1:n.not,Year = rep(years[i],n.not),B.exp = NA,B.mod = NA,B.fr = NA,B.rec = NA,B.diff = NA,B.per.diff = NA,
m = NA,C = NA,g = NA,gR = NA)
tmp[[as.character(years[i+1])]] <- data.frame(knot = 1:n.not,Year = rep(years[i+1],n.not),B.exp = B2,B.mod = B.next,B.fr = Bst,B.rec = Rst,B.diff = B.diff,B.per.diff = B.per.diff,
m = ms[,i+1],C = catchy[,i],g = rep(gs[i],n.not),gR = rep(gRs[i],n.not))
}
# Unpack that object
B.diff.comp <- do.call('rbind',tmp)
Bdiff.comp.dat<-data.frame(B.diff=as.vector(B.diff.comp$B.diff),B.per.diff = as.vector(B.diff.comp$B.per.diff),
B.exp=as.vector(B.diff.comp$B.exp),B.mod = as.vector(B.diff.comp$B.mod),
B.fr=as.vector(B.diff.comp$B.fr),B.rec = as.vector(B.diff.comp$B.rec),
Year=matYear1,knotID=knots1)
Bdiff.comp.dat.plot<-left_join(knot.gis,Bdiff.comp.dat,by=c("knotID"))
Bdiff.comp.dat.plot <- Bdiff.comp.dat.plot %>% dplyr::filter(Year != min(years))
# Save this so I can compare the 'miss' across models...
saveRDS(Bdiff.comp.dat.plot,paste0(repo.loc,"Results/Models/Sab/Sab_",mod.select,"_model_output_",scenario.select,"_B_differnce.Rds"))
# Smaller text for spatial figures.
theme_set(theme_few(base_size = 14))
#Spatial predictions
#B
b.brk <- pretty(log(B.dat.plot$B))
b.lab <- signif(exp(b.brk),digits=2)
spatial.B.plot<- ggplot() + geom_sf(data=B.dat.plot,aes(fill=log(B)),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_viridis_c(breaks = b.brk, labels=b.lab,name="Predicted Biomass \nDensity (kg\U2022km\U207B\U00B2)",option = "A",begin=0.2) +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_biomass.png"),spatial.B.plot,base_width = 10,base_height = 10)
# Remove missing survey years
spatial.B.plot<- ggplot() + geom_sf(data=B.dat.plot,aes(fill=log(B)),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_viridis_c(breaks = b.brk, labels=b.lab,name="Predicted Biomass \nDensity (kg\U2022km\U207B\U00B2)",option = "A",begin=0.2) +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_biomass.png"),spatial.B.plot,base_width = 10,base_height = 10)
# Remove missing survey years
spatial.B.plot<- ggplot() + geom_sf(data=B.dat.plot %>% dplyr::filter(!Year %in% c(2015,2020,2023)),aes(fill=log(B)),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_viridis_c(breaks = b.brk, labels=b.lab,name="Predicted Biomass \nDensity (kg\U2022km\U207B\U00B2)",option = "A",begin=0.2) +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_biomass_no_missing_surveys.png"),spatial.B.plot,base_width = 10,base_height = 10)
# Subest to 4 years
spatial.B.plot<- ggplot() + geom_sf(data=B.dat.plot %>% dplyr::filter(Year %in% c(2001,2009,2014,2019)),aes(fill=log(B)),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_viridis_c(breaks = b.brk, labels=b.lab, name="Predicted Biomass \nDensity (kg\U2022km\U207B\U00B2)",option = "A",begin=0.2) +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_biomass_4_years.png"),spatial.B.plot,base_width = 10,base_height =7)
#R
r.brk <- pretty(log(R.dat.plot$R))
r.lab <- signif(exp(r.brk),digits=2)
spatial.R.plot<- ggplot() + geom_sf(data=R.dat.plot,aes(fill=log(R)),col='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_viridis_c(breaks = r.brk, labels = r.lab,name="Predicted Recruit \nDensity (kg\U2022km\U207B\U00B2)",end=0.8)+
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_recruits.png"),spatial.R.plot,base_width = 10,base_height = 10)
# Remove missing survey years
spatial.R.plot<- ggplot() + geom_sf(data=R.dat.plot %>% dplyr::filter(!Year %in% c(2015,2020,2023)),aes(fill=log(R)),col='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_viridis_c(breaks = r.brk, labels = r.lab,name="Predicted Recruit \nDensity (kg\U2022km\U207B\U00B2)",end=0.8)+
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_recruits_no_missing_surveys.png"),spatial.R.plot,base_width = 10,base_height = 10)
# 4 years
spatial.R.plot<- ggplot() + geom_sf(data=R.dat.plot %>% dplyr::filter(Year %in% c(2001,2009,2014,2019)),aes(fill=log(R)),col='grey')+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
facet_wrap(~Year)+
scale_fill_viridis_c(breaks = r.brk, labels = r.lab,name="Predicted Recruit \nDensity (kg\U2022km\U207B\U00B2)",end=0.8)+
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_recruits_4_years.png"),spatial.R.plot,base_width = 10,base_height = 7)
#m
m.brk <- log(c(0.03,0.1,0.3,1))
#m.brk <- pretty(log(m.dat.plot$m))
m.lab <- signif(exp(m.brk),digits=2)
spatial.m.plot <- ggplot() + geom_sf(data=m.dat.plot,aes(fill=log(m)),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_viridis_c(breaks = m.brk, labels = m.lab,name="Predicted Natural \nMortality",option = "B",direction =1,begin = 0.2,end=1) + theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_mort.png"),spatial.m.plot,base_width = 10,base_height = 10)
# Remove missing survey years
spatial.m.plot <- ggplot() + geom_sf(data=m.dat.plot %>% dplyr::filter(!Year %in% c(2015,2020,2023)),aes(fill=log(m)),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_viridis_c(breaks = m.brk, labels = m.lab,name="Predicted Natural \nMortality",option = "B",direction =1,begin = 0.2,end=1) + theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_mort_no_missing_surveys.png"),spatial.m.plot,base_width = 10,base_height = 10)
# 4 years
spatial.m.plot <- ggplot() + geom_sf(data=m.dat.plot %>% dplyr::filter(Year %in% c(2001,2009,2014,2019)),aes(fill=log(m)),color='grey')+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
facet_wrap(~Year)+
scale_fill_viridis_c(breaks = m.brk, labels = m.lab,name="Predicted Natural \nMortality (Inst)",option = "B",direction =1,begin = 0.2,end=1) +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_mort_4_years.png"),spatial.m.plot,base_width = 10,base_height = 7)
# q
spatial.q.plot <- ggplot() + geom_sf(data=q.dat.plot,aes(fill=qI),col=NA)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_viridis_c(name="Predicted catchability (qI)",option = "C",begin = 0.2,end =0.8) +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_catchability.png"),spatial.q.plot,base_width = 10,base_height = 10)
# OK, so lets try an make a map of the spatial exploitation rates, not sure if this is all correct yet.
F.dat.plot$exp.na <- NA
F.dat.plot$exp.na[F.dat.plot$exploit != 0] <- F.dat.plot$exploit[F.dat.plot$exploit != 0]
e.brk <- log(c(0.00015,0.001,0.005,0.02,0.08))
#e.brk <- pretty(log(F.dat.plot$exp.na))
e.lab <- signif(exp(e.brk),digits=2)
spatial.exploit.plot<- ggplot() + geom_sf(data=F.dat.plot,aes(fill=log(exp.na)),color='grey') +
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
facet_wrap(~Year) + scale_fill_viridis_c(breaks = e.brk,labels = e.lab,name="Exploitation (Prop)",option = "D") + theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Spatial_exploit.png"),spatial.exploit.plot,base_width = 10,base_height = 10)
# Remove missing survey years
spatial.exploit.plot<- ggplot() + geom_sf(data=F.dat.plot %>% dplyr::filter(!Year %in% c(2015,2020,2023)),aes(fill=log(exp.na)),color='grey') +
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
facet_wrap(~Year) + scale_fill_viridis_c(breaks = e.brk,labels = e.lab,name="Exploitation (Prop)",option = "D") + theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Spatial_exploit_no_missing_surveys.png"),spatial.exploit.plot,base_width = 10,base_height = 10)
# 4 years
spatial.exploit.plot<- ggplot() + geom_sf(data=F.dat.plot %>% dplyr::filter(Year %in% c(2001,2009,2014,2019)),aes(fill=log(exp.na)),color='grey') +
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
facet_wrap(~Year) +
scale_fill_viridis_c(breaks = e.brk,labels = e.lab,name="Exploitation (Prop)",option = "D") +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Spatial_exploit_4_years.png"),spatial.exploit.plot,base_width = 10,base_height = 10)
bd.brk <- pretty(Bdiff.comp.dat.plot$B.diff)
bd.lab <- bd.brk#signif(exp(b.brk),digits=2)
spatial.Bdiff.plot<- ggplot() + geom_sf(data=Bdiff.comp.dat.plot,aes(fill=B.diff),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_distiller(type = 'div',breaks = bd.brk, labels=bd.lab, name="Expected - Modeled \nBiomass (tonnes)",palette = "RdBu") +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_BDiff.png"),spatial.Bdiff.plot,base_width = 10,base_height = 10)
# Remove missing survey years
spatial.Bdiff.plot<- ggplot() + geom_sf(data=Bdiff.comp.dat.plot %>% dplyr::filter(!Year %in% c(2015,2020,2023)),aes(fill=B.diff),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_distiller(type = 'div',breaks = bd.brk, labels=bd.lab, name="Expected - Modeled \nBiomass (tonnes)",palette = "RdBu") +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_BDiff_no_missing_surveys.png"),spatial.Bdiff.plot,base_width = 10,base_height = 10)
# 4 years
spatial.Bdiff.plot<- ggplot() + geom_sf(data=Bdiff.comp.dat.plot %>% dplyr::filter(Year %in% c(2001,2009,2014,2019)),aes(fill=B.diff),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_distiller(type = 'div',breaks = bd.brk, labels=bd.lab, name="Expected - Modeled \nBiomass (tonnes)",palette = "RdBu") +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_BDiff_4_years.png"),spatial.Bdiff.plot,base_width = 10,base_height = 7)
# Same plot but the percentage miss by cell.
pb.brk <- pretty(Bdiff.comp.dat.plot$B.per.diff)
pb.lab <- pb.brk#signif(exp(b.brk),digits=2)
spatial.per.Bdiff.plot<- ggplot() + geom_sf(data=Bdiff.comp.dat.plot,aes(fill=B.per.diff),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_distiller(type = 'div',breaks = pb.brk, labels=pb.lab, name="Expected - Modeled \nBiomass (%)",palette = "RdBu") +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_per_BDiff.png"),spatial.per.Bdiff.plot,base_width = 10,base_height = 10)
# Remove missing survey years
spatial.per.Bdiff.plot<- ggplot() + geom_sf(data=Bdiff.comp.dat.plot %>% dplyr::filter(!Year %in% c(2015,2020,2023)),aes(fill=B.per.diff),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_distiller(type = 'div',breaks = pb.brk, labels=pb.lab, name="Expected - Modeled \nBiomass (%)",palette = "RdBu") +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_per_BDiff_no_missing_surveys.png"),spatial.per.Bdiff.plot,base_width = 10,base_height = 10)
# 4 years
spatial.per.Bdiff.plot<- ggplot() + geom_sf(data=Bdiff.comp.dat.plot %>% dplyr::filter(Year %in% c(2001,2009,2014,2019)),aes(fill=B.per.diff),color='grey')+
facet_wrap(~Year)+
scale_x_continuous(breaks = c(-61.66667,-61), labels = c("61° 40'W","61° W")) +
scale_y_continuous(breaks = c(43.33333, 43.666667, 44),labels = c("43°20'N","43°40'N","44°N")) +
scale_fill_distiller(type = 'div',breaks = pb.brk, labels=pb.lab, name="Expected - Modeled \nBiomass (%)",palette = "RdBu") +
theme(axis.text.x=element_text(angle=-45,hjust=0))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Spatial_per_BDiff_4_years.png"),spatial.per.Bdiff.plot,base_width = 10,base_height = 7)
theme_set(theme_few(base_size = 22))
# And now plot them....
m.comp <- ggplot(nat.mat.plt,aes(x=Year,y=m,group = method,color=method)) + geom_line(linewidth=1.5) +
xlab("") + ylab("Natural Mortality (instantaneous)")+
scale_color_manual(values = c("blue","orange","grey","black")) +
theme(legend.title=element_blank())
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/natural_mortality_comparisons.png"),m.comp,base_height = 6,base_width = 10)
# Remove missing survey years
m.comp <- ggplot(nat.mat.plt %>% dplyr::filter(Year < 2015)) + geom_line(aes(x=Year,y=m,group = method,color=method),linewidth=1.5) +
geom_line(data = nat.mat.plt %>% dplyr::filter(Year %in% 2016:2019), aes(x=Year,y=m,group = method,color=method),linewidth=1.5) +
geom_line(data = nat.mat.plt %>% dplyr::filter(Year %in% 2021:2022), aes(x=Year,y=m,group = method,color=method),linewidth=1.5) +
xlab("") + ylab("Natural Mortality (instantaneous)")+
scale_color_manual(values = c("blue","orange","grey","black")) +
theme(legend.title=element_blank())
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/natural_mortality_comparisons_no_missing_surveys.png"),m.comp,base_height = 6,base_width = 10)
}
pred.proc <- get_processes(mod.fit)
if(mod.select == "TLM")
{
pred.proc$log_processes$year <- c(years,max(years)+1)
pred.proc$log_processes$totB.LCI <- exp(pred.proc$log_processes$log_B - 1.96*pred.proc$log_processes$se_log_B)
pred.proc$log_processes$totB.UCI <- exp(pred.proc$log_processes$log_B + 1.96*pred.proc$log_processes$se_log_B)
pred.proc$log_processes$totR.LCI <- exp(pred.proc$log_processes$log_R - 1.96*pred.proc$log_processes$se_log_R)
pred.proc$log_processes$totR.UCI <- exp(pred.proc$log_processes$log_R + 1.96*pred.proc$log_processes$se_log_R)
pred.proc$log_processes$m.LCI <- exp(pred.proc$log_processes$log_m - 1.96*pred.proc$log_processes$se_log_m)
pred.proc$log_processes$m.UCI <- exp(pred.proc$log_processes$log_m + 1.96*pred.proc$log_processes$se_log_m)
pred.proc$log_processes <- as.data.frame(pred.proc$log_processes)
}
if(mod.select != "TLM")
{
# Get the overall estimates + the 95% CI
pred.proc$log_processes$year <- c(years,max(years)+1)
pred.proc$log_processes$log_B <- pred.proc$log_tot_frame$log_totB
pred.proc$log_processes$log_R <- pred.proc$log_tot_frame$log_totR
pred.proc$log_processes$log_m <- pred.proc$log_tot_frame$log_mean_m
pred.proc$log_processes$totB.LCI <- exp(pred.proc$log_tot_frame$log_totB - 1.96*pred.proc$log_tot_frame$se_log_totB)
pred.proc$log_processes$totB.UCI <- exp(pred.proc$log_tot_frame$log_totB + 1.96*pred.proc$log_tot_frame$se_log_totB)
pred.proc$log_processes$totR.LCI <- exp(pred.proc$log_tot_frame$log_totR - 1.96*pred.proc$log_tot_frame$se_log_totR)
pred.proc$log_processes$totR.UCI <- exp(pred.proc$log_tot_frame$log_totR + 1.96*pred.proc$log_tot_frame$se_log_totR)
pred.proc$log_processes$m.LCI <- exp(pred.proc$log_tot_frame$log_mean_m - 1.96*pred.proc$log_tot_frame$se_log_mean_m)
pred.proc$log_processes$m.UCI <- exp(pred.proc$log_tot_frame$log_mean_m + 1.96*pred.proc$log_tot_frame$se_log_mean_m)
pred.proc$log_processes <- as.data.frame(pred.proc$log_processes)
}
# SEBDAM Version
# Annual explotation
if(mod.select != "TLM") catch.annual <- data.frame(totC = colSums(catchy), Year = years)
if(mod.select == "TLM") catch.annual <- data.frame(totC = catchy, Year = years)
# Not 1000% sure this is correct at this point. It is noteworth how different this is than the map though...
ann.exploit <- data.frame(year = years,B = exp(pred.proc$log_processes$log_B[-nrow(pred.proc$log_processes)]), Catch = catch.annual$totC)
ann.exploit$exploit <- ann.exploit$Catch/(ann.exploit$B+ann.exploit$Catch)
pred.proc$log_processes <- pred.proc$log_processes %>% dplyr::filter(year < 2023)
# Biomass time series
bm.ts.plot <- ggplot(pred.proc$log_processes) + geom_line(aes(year,exp(log_B)),color='firebrick2',linewidth=1.5) +
geom_ribbon(aes(ymin=totB.LCI,ymax=totB.UCI,x=year),alpha=0.5,fill='blue',color='blue') + ylim(c(0,1.6e4)) +
xlab("") + ylab("Fully Recruited Biomass (tonnes)") + scale_x_continuous(breaks = seq(1980,2030,by=3))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Biomass_time_series.png"),bm.ts.plot,base_width = 11,base_height = 8.5)
# Recruit time seris
rec.ts.plot <- ggplot(pred.proc$log_processes) + geom_line(aes(year,exp(log_R)),color='firebrick2',linewidth=1.5) +
geom_ribbon(aes(ymin=totR.LCI,ymax=totR.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
xlab("") + ylab("Recruit Biomass (tonnes)") + ylim(c(0,3.5e3)) + scale_x_continuous(breaks = seq(1980,2030,by=3))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Recruit_time_series.png"),rec.ts.plot,base_width = 11,base_height = 8.5)
# Natural mortality time series...
mort.ts.plot <- ggplot(pred.proc$log_processes) + geom_line(aes(year,exp(log_m)),color='firebrick2',linewidth=1.5) +
geom_ribbon(aes(ymin=m.LCI,ymax=m.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
xlab("") + ylab("Natural mortality (Instantaneous)") + ylim(c(0,2.5)) + scale_x_continuous(breaks = seq(1980,2030,by=3))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_nat_mort_time_series.png"),mort.ts.plot,base_width = 11,base_height = 8.5)
# Explotation Rate Time Series
exploit.plot <- ggplot(ann.exploit) + geom_line(aes(x=year,y=exploit),linewidth = 1.5) +
xlab("") + ylab("Exploitation Rate (Proportional)") + ylim(c(0,0.2)) + scale_x_continuous(breaks = seq(1980,2030,by=3))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_exploit_time_series.png"),exploit.plot,base_width = 11,base_height = 8.5)
# Same plots but remvoing missing survey years....
bm.ts.plot <- ggplot(pred.proc$log_processes %>% dplyr::filter(year < c(2015))) +
geom_line(aes(year,exp(log_B)),color='firebrick2',linewidth=1.5) + geom_ribbon(aes(ymin=totB.LCI,ymax=totB.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
geom_line(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2016:2019), aes(year,exp(log_B)),color='firebrick2',linewidth=1.5) +
geom_ribbon(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2016:2019), aes(ymin=totB.LCI,ymax=totB.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
geom_line(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2021:2022), aes(year,exp(log_B)),color='firebrick2',linewidth=1.5) +
geom_ribbon(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2021:2022), aes(ymin=totB.LCI,ymax=totB.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
ylim(c(0,1.6e4)) + xlab("") + ylab("Fully Recruited Biomass (tonnes)") + scale_x_continuous(breaks = seq(1980,2030,by=3))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Biomass_time_series_no_missing_surveys.png"),bm.ts.plot,base_width = 11,base_height = 8.5)
# Recruit time seris
rec.ts.plot <- ggplot(pred.proc$log_processes %>% dplyr::filter(year < c(2015))) +
geom_line(aes(year,exp(log_R)),color='firebrick2',linewidth=1.5) + geom_ribbon(aes(ymin=totR.LCI,ymax=totR.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
geom_line(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2016:2019), aes(year,exp(log_R)),color='firebrick2',linewidth=1.5) +
geom_ribbon(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2016:2019), aes(ymin=totR.LCI,ymax=totR.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
geom_line(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2021:2022), aes(year,exp(log_R)),color='firebrick2',linewidth=1.5) +
geom_ribbon(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2021:2022), aes(ymin=totR.LCI,ymax=totR.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
xlab("") + ylab("Recruit Biomass (tonnes)") + ylim(c(0,3.5e3)) + scale_x_continuous(breaks = seq(1980,2030,by=3))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_Recruit_time_series_no_missing_surveys.png"),rec.ts.plot,base_width = 11,base_height = 8.5)
# Natural mortality time series...
mort.ts.plot <- ggplot(pred.proc$log_processes %>% dplyr::filter(year < c(2015))) +
geom_line(aes(year,exp(log_m)),color='firebrick2',linewidth=1.5) + geom_ribbon(aes(ymin=m.LCI,ymax=m.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
geom_line(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2016:2019), aes(year,exp(log_m)),color='firebrick2',linewidth=1.5) +
geom_ribbon(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2016:2019), aes(ymin=m.LCI,ymax=m.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
geom_line(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2021:2022), aes(year,exp(log_m)),color='firebrick2',linewidth=1.5) +
geom_ribbon(data = pred.proc$log_processes %>% dplyr::filter(year %in% 2021:2022), aes(ymin=m.LCI,ymax=m.UCI,x=year),alpha=0.5,fill='blue',color='blue') +
xlab("") + ylab("Natural mortality (Instantaneous)") + ylim(c(0,1.5)) + scale_x_continuous(breaks = seq(1980,2030,by=3))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_nat_mort_time_series_no_missing_surveys.png"),mort.ts.plot,base_width = 11,base_height = 8.5)
# Explotation Rate Time Series
exploit.plot <- ggplot(ann.exploit%>% dplyr::filter(year < c(2015))) + geom_line(aes(x=year,y=exploit),linewidth = 1.5) +
geom_line(data = ann.exploit%>% dplyr::filter(year %in% 2016:2019), aes(x=year,y=exploit),linewidth = 1.5) +
geom_line(data= ann.exploit%>% dplyr::filter(year %in% 2021:2022),aes(x=year,y=exploit),linewidth = 1.5) +
xlab("") + ylab("Exploitation Rate (Proportional)") + ylim(c(0,0.2)) + scale_x_continuous(breaks = seq(1980,2030,by=3))
save_plot(paste0(repo.loc,"Results/Figures/Sab/",mod.select,"_",scenario.select,"/Sab_exploit_time_series_no_missing_surveys.png"),exploit.plot,base_width = 11,base_height = 8.5)
|
#
# Exercise 1
#
# generate numbers
# m = 100000
# choose ditributions (normal, poisson, ...)
# berechne zufallszahlen mit r____-Funktion
# Berechne ( M - E(X)) / sqrt( Var(X) / m ), das "standardized mean of empirical mean"
#
makeStuff <- function (n, random_numbers, m.teo, v.teo, name) {
# arithmethisches Mittel
m = mean(random_numbers)
# Varianz
v = var(random_numbers)
# result
result = (m - m.teo) / sqrt(v.teo / n)
# plot histogram
x11()
hist(random_numbers, main = c(name, " ", result))
return(result)
}
myUniform <- function(n){ # m Anzahl Zufallszahlen
m.teo = 0.5 # Theoretischer Erwartungswert
v.teo = 1 / 12 # Theoretische Varianz
# generate random numbers
random_numbers = runif(n)
result = makeStuff(n, random_numbers, m.teo, v.teo, name = "Gleichverteilung")
}
myPoisson <- function(n, lambda){ # m Anzahl Zufallszahlen
m.teo = lambda # Theoretischer Erwartungswert
v.teo = lambda # Theoretische Varianz
# generate random numbers
random_numbers = rpois(n, lambda)
makeStuff(n, random_numbers, m.teo, v.teo, name = "Poisson-Verteilung")
}
myNormal <- function(n){ # n Anzahl Zufallszahlen
m.teo = 0 # Theoretischer Erwartungswert
v.teo = 1 # Theoretische Varianz
# generate random numbers
random_numbers = rnorm(n)
makeStuff(n, random_numbers, m.teo, v.teo, name = "Standardnormalverteilung")
}
myExponential <- function(n, lambda){ # n Anzahl Zufallszahlen
m.teo = 1 / lambda # Theoretischer Erwartungswert
v.teo = 1 / (lambda^2) # Theoretische Varianz
# generate random numbers
random_numbers = rnorm(n)
makeStuff(n, random_numbers, m.teo, v.teo, name = "Exponentialverteilung")
}
n = 10000000
myUniform(n)
myPoisson(n, lambda = 3)
myExponential(n, lambda = 3)
myNormal(n)
|
/Exercise-01.R
|
no_license
|
jcalov/HFT-Simulation-Studies-2019
|
R
| false
| false
| 1,807
|
r
|
#
# Exercise 1
#
# generate numbers
# m = 100000
# choose ditributions (normal, poisson, ...)
# berechne zufallszahlen mit r____-Funktion
# Berechne ( M - E(X)) / sqrt( Var(X) / m ), das "standardized mean of empirical mean"
#
makeStuff <- function (n, random_numbers, m.teo, v.teo, name) {
# arithmethisches Mittel
m = mean(random_numbers)
# Varianz
v = var(random_numbers)
# result
result = (m - m.teo) / sqrt(v.teo / n)
# plot histogram
x11()
hist(random_numbers, main = c(name, " ", result))
return(result)
}
myUniform <- function(n){ # m Anzahl Zufallszahlen
m.teo = 0.5 # Theoretischer Erwartungswert
v.teo = 1 / 12 # Theoretische Varianz
# generate random numbers
random_numbers = runif(n)
result = makeStuff(n, random_numbers, m.teo, v.teo, name = "Gleichverteilung")
}
myPoisson <- function(n, lambda){ # m Anzahl Zufallszahlen
m.teo = lambda # Theoretischer Erwartungswert
v.teo = lambda # Theoretische Varianz
# generate random numbers
random_numbers = rpois(n, lambda)
makeStuff(n, random_numbers, m.teo, v.teo, name = "Poisson-Verteilung")
}
myNormal <- function(n){ # n Anzahl Zufallszahlen
m.teo = 0 # Theoretischer Erwartungswert
v.teo = 1 # Theoretische Varianz
# generate random numbers
random_numbers = rnorm(n)
makeStuff(n, random_numbers, m.teo, v.teo, name = "Standardnormalverteilung")
}
myExponential <- function(n, lambda){ # n Anzahl Zufallszahlen
m.teo = 1 / lambda # Theoretischer Erwartungswert
v.teo = 1 / (lambda^2) # Theoretische Varianz
# generate random numbers
random_numbers = rnorm(n)
makeStuff(n, random_numbers, m.teo, v.teo, name = "Exponentialverteilung")
}
n = 10000000
myUniform(n)
myPoisson(n, lambda = 3)
myExponential(n, lambda = 3)
myNormal(n)
|
\name{survival_exp}
\alias{survival_exp}
\title{survival_exp function}
\usage{
survival_exp(inputs)
}
\arguments{
\item{inputs}{inputs for the function including the model
matrix, frailties, fixed effects and the parameters of
the baseline hazard derived from this model}
}
\value{
the survival function for the individual
}
\description{
A function to compute the survival function for an
individual where the baseline hazard comes from an
exponential survival model
}
|
/man/survival_exp.Rd
|
no_license
|
ssouyris/spatsurv
|
R
| false
| false
| 477
|
rd
|
\name{survival_exp}
\alias{survival_exp}
\title{survival_exp function}
\usage{
survival_exp(inputs)
}
\arguments{
\item{inputs}{inputs for the function including the model
matrix, frailties, fixed effects and the parameters of
the baseline hazard derived from this model}
}
\value{
the survival function for the individual
}
\description{
A function to compute the survival function for an
individual where the baseline hazard comes from an
exponential survival model
}
|
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM)
# Marc Kery & Andy Royle, Academic Press, 2016.
# spline.prep - section 10.14 p623
# Function to prepare input for BUGS model when fitting a spline for a covariate
# (introduced in Section 10.14)
spline.prep <- function(cov, nknot = NA){
# Function chooses knots and creates design matrices for fixed and
# random-effects parts of a spline model for a chosen covariate
# Based on code by Crainiceanu et al. (2005) and Zuur et al. (2012)
# Allows you to choose number of knots or else uses it by the rule
# given in Crainiceanu et al. (2005)
# Prepares fixed part of covariate as a quadratic polynomial
# Determine number and position of knots
# ifelse(is.na(nknot),
# n.knots <- max(5, min(round(length(unique(cov))/4), 35)),
# n.knots <- nknot)
if(is.na(nknot)) {
n.knots <- max(5, min(round(length(unique(cov))/4), 35))
} else {
n.knots <- nknot
}
prob.tmp <- seq(0,1, length = n.knots + 2)
prob <- prob.tmp[-c(1, length(prob.tmp))]
knots <- quantile(unique(cov), probs = prob)
# Create design matrices for fixed and random effects
X <- cbind(rep(1, length(cov)), cov, cov^2) # Fixed-eff DM
Z.tmp <- (abs(outer(cov, knots, "-")))^3
omega.all <- (abs(outer(knots, knots, "-")))^3
svd.omega.all <- svd(omega.all)
sqrt.omega.all <- t(svd.omega.all$v %*% (t(svd.omega.all$u) * sqrt(svd.omega.all$d)))
Z <- t(solve(sqrt.omega.all, t(Z.tmp))) # Rand. eff. DM
# Output
return(list(cov = cov, knots = knots, X = X, Z = Z))
}
|
/R/spline-prep_10-14_Prepare_input_spline.R
|
no_license
|
guillaumesouchay/AHMbook
|
R
| false
| false
| 1,552
|
r
|
# Functions for the book Applied Hierarchical Modeling in Ecology (AHM)
# Marc Kery & Andy Royle, Academic Press, 2016.
# spline.prep - section 10.14 p623
# Function to prepare input for BUGS model when fitting a spline for a covariate
# (introduced in Section 10.14)
spline.prep <- function(cov, nknot = NA){
# Function chooses knots and creates design matrices for fixed and
# random-effects parts of a spline model for a chosen covariate
# Based on code by Crainiceanu et al. (2005) and Zuur et al. (2012)
# Allows you to choose number of knots or else uses it by the rule
# given in Crainiceanu et al. (2005)
# Prepares fixed part of covariate as a quadratic polynomial
# Determine number and position of knots
# ifelse(is.na(nknot),
# n.knots <- max(5, min(round(length(unique(cov))/4), 35)),
# n.knots <- nknot)
if(is.na(nknot)) {
n.knots <- max(5, min(round(length(unique(cov))/4), 35))
} else {
n.knots <- nknot
}
prob.tmp <- seq(0,1, length = n.knots + 2)
prob <- prob.tmp[-c(1, length(prob.tmp))]
knots <- quantile(unique(cov), probs = prob)
# Create design matrices for fixed and random effects
X <- cbind(rep(1, length(cov)), cov, cov^2) # Fixed-eff DM
Z.tmp <- (abs(outer(cov, knots, "-")))^3
omega.all <- (abs(outer(knots, knots, "-")))^3
svd.omega.all <- svd(omega.all)
sqrt.omega.all <- t(svd.omega.all$v %*% (t(svd.omega.all$u) * sqrt(svd.omega.all$d)))
Z <- t(solve(sqrt.omega.all, t(Z.tmp))) # Rand. eff. DM
# Output
return(list(cov = cov, knots = knots, X = X, Z = Z))
}
|
# data
kospi <- kospi_20181012
rm(kospi_20181012)
# explore
str(kospi)
summary(kospi)
# preparation for prophet::facebook
kospi_sub <- subset(kospi, select = c(date, closing_price))
kospi_sub$date <- as.Date(kospi_sub$date)
if(!require(dplyr)){
install.packages("dplyr")
library(dplyr)
}
kospi_prophet <- rename(kospi_sub, ds = date, y = closing_price)
# prophet::facebook
if(!require(prophet)){
install.packages("prophet")
library(prophet)
}
m <- prophet(kospi_prophet, daily.seasonality = TRUE)
future <- make_future_dataframe(m, periods = 80)
tail(future)
forecast <- predict(m, future)
tail(forecast[c('ds', 'yhat','yhat_lower', 'yhat_upper')])
plot(m, forecast)
prophet_plot_components(m, forecast)
|
/prophet_kospi.R
|
permissive
|
hansjang/R_Radar
|
R
| false
| false
| 756
|
r
|
# data
kospi <- kospi_20181012
rm(kospi_20181012)
# explore
str(kospi)
summary(kospi)
# preparation for prophet::facebook
kospi_sub <- subset(kospi, select = c(date, closing_price))
kospi_sub$date <- as.Date(kospi_sub$date)
if(!require(dplyr)){
install.packages("dplyr")
library(dplyr)
}
kospi_prophet <- rename(kospi_sub, ds = date, y = closing_price)
# prophet::facebook
if(!require(prophet)){
install.packages("prophet")
library(prophet)
}
m <- prophet(kospi_prophet, daily.seasonality = TRUE)
future <- make_future_dataframe(m, periods = 80)
tail(future)
forecast <- predict(m, future)
tail(forecast[c('ds', 'yhat','yhat_lower', 'yhat_upper')])
plot(m, forecast)
prophet_plot_components(m, forecast)
|
\name{checkStoppingRule-methods}
\docType{methods}
\alias{checkStoppingRule-methods}
\alias{checkStoppingRule,APlusBSpecifier,CTData,numeric-method}
\alias{checkStoppingRule,CRMSpecifier,CTData,numeric-method}
\alias{checkStoppingRule}
\title{ Methods "checkStoppingRule" }
\description{
checks the stopping rule of a clinical trial, and creates a list of new actions if a clinical
trial is to continue, otherwise adds new data such as conclusions from a clinical trial.
}
\section{Methods}{
\describe{
\item{\code{signature(designSpec = "APlusBSpecifier", currentCTData = "CTData",
currentGlobalTime = "numeric")}}{
checks the stopping rule, and if the trial is to stop,draws the conclusion on the
recommended Phase II dose,otherwise creates a list of two new actions whose executions
trigger the calls to \code{link{generatePatsBaseChars}} and \code{link{allocateTrts}}
respectively. }
\item{\code{signature(designSpec = "CRMSpecifier", currentCTData = "CTData",
currentGlobalTime = "numeric")}}{
checks the stopping rule, and if the trial is to stop,draws the conclusion on the
recommended Phase II dose and dose level ,otherwise creates a list of two or three (if at
the initial stage of a clinical trial with two-stage CRM design) new actions. The
executions of the new actions trigger the calls in temporal order to: (1)
\code{\link{checkSwitchingStageRule}}(if in the initial stage of a two-stage CRM trial),
(2)\code{link{generatePatsBaseChars}},and (3)\code{link{allocateTrts}}. }
}}
\keyword{methods}
|
/CTDesignExplorerDevel/man/checkStoppingRule-methods.Rd
|
no_license
|
ishanina/CTDesignExperimenter
|
R
| false
| false
| 1,674
|
rd
|
\name{checkStoppingRule-methods}
\docType{methods}
\alias{checkStoppingRule-methods}
\alias{checkStoppingRule,APlusBSpecifier,CTData,numeric-method}
\alias{checkStoppingRule,CRMSpecifier,CTData,numeric-method}
\alias{checkStoppingRule}
\title{ Methods "checkStoppingRule" }
\description{
checks the stopping rule of a clinical trial, and creates a list of new actions if a clinical
trial is to continue, otherwise adds new data such as conclusions from a clinical trial.
}
\section{Methods}{
\describe{
\item{\code{signature(designSpec = "APlusBSpecifier", currentCTData = "CTData",
currentGlobalTime = "numeric")}}{
checks the stopping rule, and if the trial is to stop,draws the conclusion on the
recommended Phase II dose,otherwise creates a list of two new actions whose executions
trigger the calls to \code{link{generatePatsBaseChars}} and \code{link{allocateTrts}}
respectively. }
\item{\code{signature(designSpec = "CRMSpecifier", currentCTData = "CTData",
currentGlobalTime = "numeric")}}{
checks the stopping rule, and if the trial is to stop,draws the conclusion on the
recommended Phase II dose and dose level ,otherwise creates a list of two or three (if at
the initial stage of a clinical trial with two-stage CRM design) new actions. The
executions of the new actions trigger the calls in temporal order to: (1)
\code{\link{checkSwitchingStageRule}}(if in the initial stage of a two-stage CRM trial),
(2)\code{link{generatePatsBaseChars}},and (3)\code{link{allocateTrts}}. }
}}
\keyword{methods}
|
#Quiz3
setwd("/media/michael/PATRIOT/Self-Education/John Hopkins Data Science Specialization/3. Getting and Cleaning Data/week3/quiz")
# Question 1
if(!file.exists('data')) {
dir.create('data')
}
fileUrl <- 'https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv'
codeBookUrl <- 'https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FPUMSDataDict06.pdf'
download.file(fileUrl, destfile = 'data/housing.csv', method = 'curl') # do not use method = 'curl' on Windows
download.file(codeBookUrl, destfile = 'data/housingCodeBook.pdf', method = 'curl')
dataDownloaded1 <- date()
data <- read.csv('data/housing.csv')
agricultureLogical <- data$ACR == 3 & data$AGS == 6
which(agricultureLogical)
# Question 2
library(jpeg)
if(!file.exists('data')) {
dir.create('data')
}
fileUrl <- 'https://d396qusza40orc.cloudfront.net/getdata%2Fjeff.jpg'
download.file(fileUrl, destfile = 'data/picture.jpg', method = 'curl')
dateDownloaded2 <- date()
nativeRasterIMG <- readJPEG('data//picture.jpg', native = TRUE)
quantile(nativeRasterIMG, c(.30, .80))
# Question 3
if(!file.exists('data')) {
dir.create('data')
}
GPD_url <- 'https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv'
EDU_url <- 'https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv'
download.file(GPD_url, destfile = 'data/gdp.csv', method = 'curl')
download.file(EDU_url, destfile = 'data/edu.csv', method = 'curl')
#gdpData requires cleaning
gdpData <- read.csv('data/gdp.csv')
str(gdpData)
gdpData <- read.csv('data/gdp.csv', header = FALSE, stringsAsFactors = FALSE)
str(gdpData)
head(gdpData)
tail(gdpData)
gdpData <- gdpData[,c(1,2,4,5)]
names(gdpData) <- c('CountryCode', 'ranking', 'country', 'USD(millions)')
gdpData[gdpData == '' | gdpData == '..'] <- NA
gdpData <- gdpData[rowSums(is.na(gdpData)) != 4,]
head(gdpData)
tail(gdpData)
dim(gdpData)
gdpData <- gdpData[4:231,]
gdpData <- gdpData[!is.na(gdpData$ranking),]
eduData <- read.csv('data/edu.csv')
mergedData <- merge(gdpData, eduData, by = 'CountryCode') #
length(mergedData$CountryCode)
mergedData$ranking <- as.numeric(mergedData$ranking)
mergedData[order(mergedData$ranking, decreasing = TRUE), ][13, ]
## Alternative streamlined solution to Question 3
library(dplyr)
colClasses <- c('character', 'character', 'NULL', 'character', 'character', 'character', 'NULL', 'NULL', 'NULL', 'NULL')
gdpData <- read.csv('data/gdp.csv', header = FALSE, colClasses = colClasses, skip = 5, na.strings = c('', '..'))
names(gdpData) <- c('CountryCode', 'ranking', 'country', 'USDmillions', 'X')
gdpData <- filter(gdpData, !is.na(CountryCode))
eduData <- read.csv('data/edu.csv')
mergedData <- merge(gdpData, eduData, by = 'CountryCode')
mergedData <- mergedData %>% filter(!is.na(ranking)) %>% mutate(ranking = as.integer(ranking), USDmillions = as.numeric(gsub(",","", USDmillions))) %>% arrange(desc(ranking))
mergedData$ranking
nrow(mergedData)
mergedData[13,]$country
#may have wanted to remove unused columns
# Question 4
group_by(mergedData, Income.Group) %>% summarize(mean(ranking))
tapply(mergedData$ranking, mergedData$Income.Group, mean)
# Question 5
library(Hmisc)
mergedData$ranking.Group <- cut2(mergedData$ranking, g = 5)
table(mergedData$ranking.Group, mergedData$Income.Group)
|
/3. Getting and Cleaning Data/week3/quiz/quiz3.R
|
no_license
|
MichaelDS/Data-Science-Coursera
|
R
| false
| false
| 3,273
|
r
|
#Quiz3
setwd("/media/michael/PATRIOT/Self-Education/John Hopkins Data Science Specialization/3. Getting and Cleaning Data/week3/quiz")
# Question 1
if(!file.exists('data')) {
dir.create('data')
}
fileUrl <- 'https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv'
codeBookUrl <- 'https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FPUMSDataDict06.pdf'
download.file(fileUrl, destfile = 'data/housing.csv', method = 'curl') # do not use method = 'curl' on Windows
download.file(codeBookUrl, destfile = 'data/housingCodeBook.pdf', method = 'curl')
dataDownloaded1 <- date()
data <- read.csv('data/housing.csv')
agricultureLogical <- data$ACR == 3 & data$AGS == 6
which(agricultureLogical)
# Question 2
library(jpeg)
if(!file.exists('data')) {
dir.create('data')
}
fileUrl <- 'https://d396qusza40orc.cloudfront.net/getdata%2Fjeff.jpg'
download.file(fileUrl, destfile = 'data/picture.jpg', method = 'curl')
dateDownloaded2 <- date()
nativeRasterIMG <- readJPEG('data//picture.jpg', native = TRUE)
quantile(nativeRasterIMG, c(.30, .80))
# Question 3
if(!file.exists('data')) {
dir.create('data')
}
GPD_url <- 'https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv'
EDU_url <- 'https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv'
download.file(GPD_url, destfile = 'data/gdp.csv', method = 'curl')
download.file(EDU_url, destfile = 'data/edu.csv', method = 'curl')
#gdpData requires cleaning
gdpData <- read.csv('data/gdp.csv')
str(gdpData)
gdpData <- read.csv('data/gdp.csv', header = FALSE, stringsAsFactors = FALSE)
str(gdpData)
head(gdpData)
tail(gdpData)
gdpData <- gdpData[,c(1,2,4,5)]
names(gdpData) <- c('CountryCode', 'ranking', 'country', 'USD(millions)')
gdpData[gdpData == '' | gdpData == '..'] <- NA
gdpData <- gdpData[rowSums(is.na(gdpData)) != 4,]
head(gdpData)
tail(gdpData)
dim(gdpData)
gdpData <- gdpData[4:231,]
gdpData <- gdpData[!is.na(gdpData$ranking),]
eduData <- read.csv('data/edu.csv')
mergedData <- merge(gdpData, eduData, by = 'CountryCode') #
length(mergedData$CountryCode)
mergedData$ranking <- as.numeric(mergedData$ranking)
mergedData[order(mergedData$ranking, decreasing = TRUE), ][13, ]
## Alternative streamlined solution to Question 3
library(dplyr)
colClasses <- c('character', 'character', 'NULL', 'character', 'character', 'character', 'NULL', 'NULL', 'NULL', 'NULL')
gdpData <- read.csv('data/gdp.csv', header = FALSE, colClasses = colClasses, skip = 5, na.strings = c('', '..'))
names(gdpData) <- c('CountryCode', 'ranking', 'country', 'USDmillions', 'X')
gdpData <- filter(gdpData, !is.na(CountryCode))
eduData <- read.csv('data/edu.csv')
mergedData <- merge(gdpData, eduData, by = 'CountryCode')
mergedData <- mergedData %>% filter(!is.na(ranking)) %>% mutate(ranking = as.integer(ranking), USDmillions = as.numeric(gsub(",","", USDmillions))) %>% arrange(desc(ranking))
mergedData$ranking
nrow(mergedData)
mergedData[13,]$country
#may have wanted to remove unused columns
# Question 4
group_by(mergedData, Income.Group) %>% summarize(mean(ranking))
tapply(mergedData$ranking, mergedData$Income.Group, mean)
# Question 5
library(Hmisc)
mergedData$ranking.Group <- cut2(mergedData$ranking, g = 5)
table(mergedData$ranking.Group, mergedData$Income.Group)
|
#Ensemble of Gene Set Enrichment Analyses
#
# Author: Monther Alhamdoosh, E:m.hamdoosh@gmail.com
###############################################################################
writeResultsToHTML <- function(contrast.name, gsa.results, gs.annot, method,
file.name){
title = paste0("Gene Set Enrichment Analysis on ", gs.annot@name, "
using ", method, " (",
contrast.name, ")")
path = strsplit(file.name, "/")[[1]]
file = path[length(path)]
file = gsub(".txt", ".html", file)
path = paste0(paste(path[1:length(path) -1], collapse = "/"), "/")
if (!is.data.frame(gsa.results)){
rows = row.names(gsa.results)
gsa.results = data.frame(gsa.results)
gsa.results = lapply(gsa.results, as.numeric)
gsa.results = as.data.frame(gsa.results)
row.names(gsa.results) = rows
}
annot.frame = gs.annot@anno[match(rownames(gsa.results),
gs.annot@anno[,"GeneSet"]),]
if ("BroadUrl" %in% colnames(annot.frame)){
annot.frame[, "BroadUrl"] = hmakeTag("a", "Go to Broad",
href=annot.frame[, "BroadUrl"])
}
if (length(grep("^gsdbgo", gs.annot@label)) == 1){
annot.frame[, "SourceDB"] = "Go to AmiGO"
annot.frame[, "SourceURL"] = paste0("http://amigo.geneontology.org/amigo/term/",
annot.frame[, "GOID"])
}
if ("SourceURL" %in% colnames(annot.frame)){
annot.frame[, "SourceDB"] = hmakeTag("a", annot.frame[,
"SourceDB"], href=annot.frame[, "SourceURL"])
annot.frame = annot.frame[, "SourceURL" !=
colnames(annot.frame)]
}
table.data = data.frame(annot.frame, gsa.results)
if ("Rank" %in% colnames(table.data) ){
table.data[, "Rank"] = as.numeric(table.data[, "Rank"])
}
if ("p.value" %in% colnames(table.data)){
table.data[, "p.value"] = as.character(table.data[, "p.value"])
}
if ("p.adj" %in% colnames(table.data)){
table.data[, "p.adj"] = as.character(table.data[, "p.adj"])
}
capture.output(HTMLsortedTable(table.data, title, title, file=file, path=path))
annot.frame = gs.annot@anno[match(rownames(gsa.results),
gs.annot@anno[,"GeneSet"]),]
table.data = data.frame(Rank=as.numeric(seq(1, nrow(gsa.results))),
annot.frame,
gsa.results)
write.table(table.data,
file=file.name,
sep="\t",
quote=FALSE,
row.names=FALSE)
}
writeEGSEAResultsToHTML <- function(contrast.name, gsa.results, gs.annot,
file.name, comparison=FALSE, interactive=TRUE){
title = paste0("Gene Set Enrichment Analysis on ", gs.annot@name, "
using EGSEA (",
contrast.name, ")")
path = strsplit(file.name, "/")[[1]]
file = path[length(path)]
file = gsub(".txt", ".html", file)
path = paste0(paste(path[1:length(path) -1], collapse = "/"), "/")
if (!is.data.frame(gsa.results)){
rows = row.names(gsa.results)
gsa.results = data.frame(gsa.results)
gsa.results = lapply(gsa.results, as.numeric)
gsa.results = as.data.frame(gsa.results)
row.names(gsa.results) = rows
}
ids = gs.annot@anno[match(rownames(gsa.results), gs.annot@anno[,2]),
"ID"]
if (!comparison){
heatmaps = paste0("../hm-top-gs-", gs.annot@label, "/",
sub(" - ", "-", contrast.name), "/", ids,
".heatmap.pdf")
csvfiles = paste0("../hm-top-gs-", gs.annot@label, "/",
sub(" - ", "-", contrast.name), "/", ids,
".heatmap.csv")
}
else{
heatmaps = paste0("../hm-top-gs-", gs.annot@label, "/", ids,
".heatmap.multi.pdf")
csvfiles = paste0("../hm-top-gs-", gs.annot@label, "/", ids,
".heatmap.multi.csv")
}
heatmaps = hmakeTag("a", "Show Map", href=heatmaps)
csvfiles = hmakeTag("a", "Interpret Results", href=csvfiles)
heatmaps = paste(heatmaps, csvfiles)
if (length(grep("^kegg", gs.annot@label)) == 0){
table.data = data.frame(Rank=as.numeric(seq(1,
nrow(gsa.results))),
Heatmaps=heatmaps,
gs.annot@anno[match(rownames(gsa.results),
gs.annot@anno[,"GeneSet"]),],
gsa.results)
write.table(table.data[, -c(which(colnames(table.data) ==
"Heatmaps"))],
file=file.name,
sep="\t",
quote=FALSE,
row.names=FALSE)
}
else{
if (!comparison){
pathways = paste0("../pv-top-gs-", gs.annot@label, "/",
sub(" - ", "-", contrast.name), "/",
ids, ".pathview.png")
} else{
pathways = paste0("../pv-top-gs-", gs.annot@label, "/",
ids, ".pathview.multi.png")
}
pathways = hmakeTag("a", "Show Pathway", href=pathways)
pathways = paste(pathways, csvfiles)
table.data = data.frame(Rank=as.numeric(seq(1,
nrow(gsa.results))),
Heatmaps=heatmaps,
Pathways=pathways,
gs.annot@anno[match(rownames(gsa.results),
gs.annot@anno[,"GeneSet"]),],
gsa.results)
write.table(table.data[, -c(which(colnames(table.data) ==
"Heatmaps"), which(colnames(table.data) == "Pathways"))],
file=file.name,
sep="\t",
quote=FALSE,
row.names=FALSE)
}
for (i in 1:ncol(table.data)){
if (is.numeric(table.data[, i])){
if (!anyOccur(colnames(table.data)[i], c("p.value",
"p.adj", "min.pvalue")))
table.data[, i] = round(table.data[,i], 2)
else if (! interactive)
table.data[, i] = round(table.data[,i], 6)
}
}
if ("BroadUrl" %in% colnames(table.data)){
table.data[, "BroadUrl"] = hmakeTag("a", "Go to Broad",
href=table.data[, "BroadUrl"],
'target'='_blank')
}
if (length(grep("^gsdbgo", gs.annot@label)) == 1){
table.data[, "SourceDB"] = "Go to AmiGO"
table.data[, "SourceURL"] = paste0("http://amigo.geneontology.org/amigo/term/",
table.data[, "GOID"])
}
if ("SourceURL" %in% colnames(table.data)){
table.data[, "SourceDB"] = hmakeTag("a", table.data[,
"SourceDB"], href=table.data[, "SourceURL"])
table.data = table.data[, "SourceURL" != colnames(table.data)]
}
if ("GeneSet" %in% colnames(table.data)){
table.data[, "GeneSet"] = gsub("_", " ", table.data[,
"GeneSet"])
}
if ("Description" %in% colnames(table.data)){
desc = c()
for ( t in table.data[, "Description"]){
l = stri_length(t)
if (is.na(l) || l <= 50)
desc = c(desc, t)
else
desc = c(desc, paste0("<span title='", t,"'>",
substr(t, 1, 50), " ... </span>"))
}
table.data[, "Description"] = desc
}
if ("direction" %in% colnames(table.data)){
table.data[, "direction"] =
as.character(lapply(as.numeric(table.data[, "direction"]),
function(x) if (x > 0) "Up" else if (x < 0)
"Down" else "Neutral"))
}
if ("Rank" %in% colnames(table.data) ){
table.data[, "Rank"] = as.numeric(table.data[, "Rank"])
}
if ("p.value" %in% colnames(table.data)){
table.data[, "p.value"] = as.character(table.data[, "p.value"])
}
if ("p.adj" %in% colnames(table.data)){
table.data[, "p.adj"] = as.character(table.data[, "p.adj"])
}
capture.output(HTMLsortedTable(table.data, title, title, file=file, path=path))
if(interactive){
saveWidget(widget=datatable(table.data,escape = FALSE),
selfcontained=FALSE, libdir=file.path(path,"lib"),
file=file.path(path,file)) # DT::datatable
}else{
capture.output(HTMLsortedTable(table.data, title, title, file=file, path=path))
}
}
anyOccur <- function(string, list){
occur = FALSE
for (x in list){
if (length(grep(x, string)) > 0){
occur = TRUE
break
}
}
return(occur)
}
generateSummaryPage.comparison <- function(contrast.names, gs.annot,
sum.plot.axis, sort.by, file.name, interactive=TRUE){
path = strsplit(file.name, "/")[[1]]
file = path[length(path)]
file = gsub(".txt", ".html", file)
path = paste0(paste(path[1:length(path) -1], collapse = "/"), "/")
title = paste0("Gene Set Enrichment Analysis on ", gs.annot@name, "
using EGSEA (Comparison Analysis)")
if (length(contrast.names) == 2 ){
img.files = paste0("./", gs.annot@label, "-summary-",
sum.plot.axis, c("-rank", "-dir"),".png")
plot.titles = c("Summary plot based on gene set rank and size",
"Summary plot based on regulation direction and significance")
}
else if (length(contrast.names) > 2){
img.files = c()
plot.titles = c()
anchor.names = c()
anchors.table = matrix("-", length(contrast.names),
length(contrast.names))
colnames(anchors.table) = contrast.names
rownames(anchors.table) = contrast.names
for (i in 1:(length(contrast.names)-1)){
for (j in (i+1):length(contrast.names)){
anchor.names = c(anchor.names, c(paste0('anch',
i,j),paste0('anch', i,j)))
anchors.table[i, j] = hmakeTag("a", "View",
style="text-decoration: none",
href=paste0("#", 'anch', i,j))
anchors.table[j, i] = hmakeTag("a", "View",
style="text-decoration: none",
href=paste0("#", 'anch', i,j))
img.files = c(img.files, paste0("./",
gs.annot@label, paste0('-',
i,j), "-summary-",
sum.plot.axis, c("-rank", "-dir"),
".png"))
titles = c("Summary plot based on gene set rank
and size <br/>",
"Summary plot based on
regulation direction and significance <br/>")
titles = paste0(titles, contrast.names[i]," |
", contrast.names[j])
plot.titles = c(plot.titles, titles)
}
}
}
else{
img.files = c()
plot.titles = c()
}
# add summary heatmaps if more than 2 contrasts to the comparison analysis page
if (length(contrast.names) >= 2){
file.name = paste0("./",gs.annot@label,
"-summary-heatmap-", sort.by,".png")
img.files = c(img.files, file.name)
fig.title = paste0("Summary heatmap of the top gene sets (",
hmakeTag("a", "Interpret Results",
href=sub(".png", ".csv", file.name)), ")")
plot.titles = c(plot.titles, fig.title)
}
if(interactive){
html.img.files = sub(".png", ".html", img.files)
pdfs = hmakeTag("a", "Open interactive plot", href=html.img.files)
# actually they are html files. but for convention we still call it pdf
pdf.files = html.img.files
}else{
pdf.img.files = sub(".png", ".pdf", img.files)
pdfs = hmakeTag("a", "Download pdf file", href=pdf.img.files)
pdf.files = pdf.img.files
}
file.name.bar = paste0("./",
"comparison-", gs.annot@label, "-bar-plot-",
sort.by ,".png")
img.files = c(img.files, file.name.bar)
plot.titles = c(plot.titles, "Bar plot of the top gene sets")
pdf.files = c(pdf.files, sub(".png", ".pdf", file.name.bar))
images = hmakeTag("a", hmakeTag("img", src=img.files, width=500),
href=pdf.files)
content = paste(images, plot.titles, pdfs, sep="<br/>")
if (length(contrast.names) > 2){
anchors = hmakeTag("a", "", name=anchor.names)
content = paste(anchors, content, sep="")
}
if (length(content) %% 2 != 0){
content = c(content, "")# to make sure there are multiple of 2s
}
content = matrix(content, ncol=2, byrow = TRUE)
p = openPage(file, dirname=path, title=title)
hwrite(title, heading=1, br=TRUE, page=p)
if (length(contrast.names) > 2){
hwrite(anchors.table, align="center", border=1, width="1000px",
cellspacing=0, row.bgcolor='#ffffaa',
row.style=list('font-weight:bold'),
col.bgcolor='#ffffaa',
col.style=list('font-weight:bold'),
page=p)
}
hwrite(content, align="center", border=0, page=p)
closePage(p)
}
generateAllGOgraphsPage.comparison <- function(contrast.names, gs.annot,
sort.by, file.name){
path = strsplit(file.name, "/")[[1]]
file = path[length(path)]
file = gsub(".txt", ".html", file)
path = paste0(paste(path[1:length(path) -1], collapse = "/"), "/")
title = paste0("Gene Set Enrichment Analysis on ", gs.annot@name, "
using EGSEA (Comparison Analysis)")
check.files = file.exists(paste0(path,
"comparison-", gs.annot@label, "-top-",
sort.by, "-", c("BP", "MF", "CC"), ".png"))
if (sum(check.files) == 0 )
content = hmakeTag("h3", "GO graphs could not be generated!")
else{
img.files = paste0("./comparison-", gs.annot@label, "-top-",
sort.by, "-", c("BP", "MF", "CC"), ".png")[check.files]
plot.titles =c("Top GO Biological Processes ", "Top GO Molecular Functions",
"Top GO Cellular Components ")[check.files]
pdf.files = sub(".png", ".pdf", img.files)
images = hmakeTag("a", hmakeTag("img", src=img.files, width=500),
href=pdf.files)
pdfs = hmakeTag("a", "Download pdf file", href=pdf.files)
content = paste(images, plot.titles, pdfs, sep="<br/>")
if (length(content) %% 2 != 0){
content = c(content, "")# to make sure there are multiple of 2s
}
content = matrix(content, ncol=2, byrow = TRUE)
}
p = openPage(file, dirname=path, title=title)
hwrite(title, heading=1, br=TRUE, page=p)
hwrite(content, align="center", border=0, page=p)
closePage(p)
}
generateAllGOgraphsPage <- function(contrast.name, gs.annot, sort.by,
file.name){
title = paste0("Gene Set Enrichment Analysis on ", gs.annot@name, "
using EGSEA (",
contrast.name, ")")
path = strsplit(file.name, "/")[[1]]
file = path[length(path)]
file = gsub(".txt", ".html", file)
path = paste0(normalizePath(paste(path[1:length(path) -1], collapse =
"/")), "/")
tmp = c("BP", "MF", "CC")
img.files = c()
plot.titles = c()
cat.names = c("Top GO Biological Process.", "Top GO Molecular
Functions.",
"Top GO Cellular Components.")
names(cat.names) = tmp
for (cat in tmp){
f= paste0(sub(" - ", "-", contrast.name), "-", gs.annot@label,
"-top-", sort.by, "-", cat, ".png")
if (file.exists(paste0(path, f))){
img.files = c(img.files, f)
plot.titles = c(plot.titles, cat.names[cat])
}
}
if (length(img.files) > 0){
img.files = paste0("./", img.files)
pdf.files = sub(".png", ".pdf", img.files)
images = hmakeTag("a", hmakeTag("img", src=img.files,
width=600), href=pdf.files)
pdfs = hmakeTag("a", "Download pdf file", href=pdf.files)
content = paste(images, plot.titles, pdfs, sep="<br/>")
if (length(content) %% 2 != 0){
content = c(content, "")# to make sure there are
# multiple of 2s
}
content = matrix(content, ncol=2, byrow = TRUE)
}
else
content = hmakeTag("h3", "GO graphs could not be generated!")
p = openPage(file, dirname=path, title=title)
hwrite(title, heading=1, br=TRUE, page=p)
hwrite(content, align="center", border=0, page=p)
closePage(p)
}
generateSummaryPage <- function(contrast.name, gs.annot, sum.plot.axis,
sort.by, contr.num, file.name, interactive=TRUE){
title = paste0("Gene Set Enrichment Analysis on ", gs.annot@name, "
using EGSEA (",
contrast.name, ")")
img.files = paste0("./", sub(" - ", "-", contrast.name), "-",
gs.annot@label, "-summary-", sum.plot.axis ,"-rank.png")
img.files = c(img.files, paste0("./", sub(" - ", "-", contrast.name),
"-", gs.annot@label, "-summary-", sum.plot.axis,"-dir.png"))
path = strsplit(file.name, "/")[[1]]
path = paste0(paste(path[1:length(path) -1], collapse = "/"), "/")
mds.file = paste0(sub(" - ", "-", contrast.name), "-", gs.annot@label,
"-methods.png")
if(interactive){
pdf.files = sub(".png", ".html", img.files)
pdfs = hmakeTag("a", "Open interactive plot", href=pdf.files)
}else{
pdf.files = sub(".png", ".pdf", img.files)
pdfs = hmakeTag("a", "Download pdf file", href=pdf.files)
}
if (file.exists(paste0(path, mds.file))){
img.files = c(img.files, paste0("./", mds.file))
ta = hmakeTag("a", "Download pdf file", href=sub(".png", ".pdf", paste0("./", mds.file)))
pdfs = c(pdfs, ta)
pdf.files = c(pdf.files, sub(".png", ".pdf", paste0("./", mds.file)))
}
# add summary heatmap to the contrast summary page if no. contrasts = 1
if (contr.num == 1){
file.name.sum = paste0("./", gs.annot@label,
"-summary-heatmap-", sort.by,".png")
img.files = c(img.files, file.name.sum)
ta = hmakeTag("a", "Download pdf file", href=sub(".png", ".pdf", file.name.sum))
pdfs = c(pdfs, ta)
pdf.files = c(pdf.files, sub(".png", ".pdf", file.name.sum))
}
file.name.bar = paste0("./",contrast.name,
"-", gs.annot@label, "-bar-plot-",
sort.by,".png")
img.files = c(img.files, file.name.bar)
pdf.files = c(pdf.files, sub(".png", ".pdf", file.name.bar))
images = hmakeTag("a", hmakeTag("img", src=img.files, width=500),
href=pdf.files)
pdfs = c(pdfs, hmakeTag("a", "Download pdf file", href=sub(".png", ".pdf", file.name.bar)))
plot.titles = c("Summary plot based on gene set rank and size",
"Summary plot based on regulation direction and significance")
if (file.exists(paste0(path, mds.file)))
plot.titles = c(plot.titles,
"MDS plot for the gene set ranking in different base methods.")
if (contr.num == 1) {
fig.title = paste0("Summary heatmap of the top gene sets (",
hmakeTag("a", "Interpret Results",
href=sub(".png", ".csv", file.name.sum)), ")")
plot.titles = c(plot.titles, fig.title)
}
content = paste(images, plot.titles, pdfs, sep="<br/>")
if (length(content) %% 2 != 0){
content = c(content, "")# to make sure there are multiple of 2s
}
content = matrix(content, ncol=2, byrow = TRUE)
path = strsplit(file.name, "/")[[1]]
file = path[length(path)]
file = gsub(".txt", ".html", file)
path = paste0(paste(path[1:length(path) -1], collapse = "/"), "/")
p = openPage(file, dirname=path, title=title)
hwrite(title, heading=1, br=TRUE, page=p)
hwrite(content, align="center", border=0, page=p)
closePage(p)
}
generateAllHeatmapsPage <- function(contrast.name, gsa.results, gs.annot,
file.name, comparison=FALSE){
title = paste0("Gene Set Enrichment Analysis on ", gs.annot@name, "
using EGSEA (",
contrast.name, ")")
ids = gs.annot@anno[match(rownames(gsa.results), gs.annot@anno[,2]),
"ID"]
names = gs.annot@anno[match(rownames(gsa.results), gs.annot@anno[,2]),
"GeneSet"]
if(!comparison){
heatmaps.img = paste0("./", sub(" - ", "-", contrast.name),
"/", ids, ".heatmap.png")
heatmaps = paste0("./", sub(" - ", "-", contrast.name), "/",
ids, ".heatmap.pdf")
csvfiles = paste0("./", sub(" - ", "-", contrast.name), "/",
ids, ".heatmap.csv")
} else{
heatmaps.img = paste0("./", ids, ".heatmap.multi.png")
heatmaps = paste0("./", ids, ".heatmap.multi.pdf")
csvfiles = paste0("./", ids, ".heatmap.multi.csv")
}
heatmaps.img = hmakeTag("a", hmakeTag("img", src=heatmaps.img,
width=300, center=TRUE), href=heatmaps)
heatmaps = hmakeTag("a", "Large Map", href=heatmaps)
csvfiles = hmakeTag("a", "Interpret Results", href=csvfiles)
heatmaps = paste(heatmaps, csvfiles)
heatmaps = paste(heatmaps.img, ids, substr(names, 1, 30), heatmaps,
sep="<br />")
# head(heatmaps)
if (length(heatmaps) %% 5 != 0)
heatmaps = c(heatmaps, rep("", 5 - (length(heatmaps) %% 5 )))
hm.table = matrix(heatmaps, ncol=5, byrow = TRUE)
path = strsplit(file.name, "/")[[1]]
file = path[length(path)]
file = gsub(".txt", ".html", file)
path = paste0(paste(path[1:length(path) -1], collapse = "/"), "/")
p = openPage(file, dirname=path, title=title)
hwrite(title, heading=1, br=TRUE, page=p)
hwrite(hm.table, col.width = 200, page=p)
closePage(p)
}
generateAllPathwaysPage <- function(contrast.name, gsa.results, gs.annot,
file.name, comparison=FALSE){
title = paste0("Gene Set Enrichment Analysis on ", gs.annot@name, "
using EGSEA (",
contrast.name, ")")
ids = gs.annot@anno[match(rownames(gsa.results), gs.annot@anno[,2]),
"ID"]
names = gs.annot@anno[match(rownames(gsa.results), gs.annot@anno[,2]),
"GeneSet"]
if (!comparison){
pathways.img = paste0("./", sub(" - ", "-", contrast.name),
"/", ids, ".pathview.png")
csvfiles = paste0("../hm-top-gs-",gs.annot@label, "/", sub(" -
", "-", contrast.name), "/", ids, ".heatmap.csv")
pathways = paste0("./", sub(" - ", "-", contrast.name), "/",
ids, ".pathview.png")
} else{
pathways.img = paste0("./", ids, ".pathview.multi.png")
csvfiles = paste0("../hm-top-gs-",gs.annot@label, "/", ids,
".heatmap.multi.csv")
pathways = paste0("./", ids, ".pathview.multi.png")
}
pathways.img = hmakeTag("a", hmakeTag("img", src=pathways.img,
width=300, center=TRUE), href=pathways)
csvfiles = hmakeTag("a", "Interpret Results", href=csvfiles)
pathways = hmakeTag("a", "Large Pathway", href=pathways)
pathways = paste(pathways, csvfiles)
pathways = paste(pathways.img,ids, substr(names, 1, 30), pathways,
sep="<br />")
# head(heatmaps)
if (length(pathways) %% 5 != 0)
pathways = c(pathways, rep("", 5 - (length(pathways) %% 5 )))
hm.table = matrix(pathways, ncol=5, byrow = TRUE)
path = strsplit(file.name, "/")[[1]]
file = path[length(path)]
file = gsub(".txt", ".html", file)
path = paste0(paste(path[1:length(path) -1], collapse = "/"), "/")
p = openPage(file, dirname=path, title=title)
hwrite(title, heading=1, br=TRUE, page=p)
hwrite(hm.table, col.width = 200, page=p)
closePage(p)
}
createEGSEAReport <- function(sampleSize, contr.names, gs.annots, baseInfo,
combineMethod,
sort.by, report.dir,
logFC.cal, symbolsMap,
egsea.ver,
egseadata.ver){
contr.num = length(contr.names)
gs.labels = sapply(gs.annots, function(x) x$label)
gs.names = sapply(gs.annots, function(x) x$name)
gs.versions = sapply(gs.annots, function(x) x$version)
gs.dates = sapply(gs.annots, function(x) x$date)
ranked.gs.dir = "./ranked-gene-sets-base"
pv.dir = paste0("./pv-top-gs-", gs.labels[grep("^kegg", gs.labels)],
"/")
hm.dir = paste0("./hm-top-gs-", gs.labels, "/")
summary.dir = "./summary/"
go.dir = "./go-graphs/"
p = openPage("index.html", dirname=report.dir, title="Ensemble of Gene
Set Enrichment Analyses (EGSEA) - Report")
logo.file = system.file("logo", "EGSEA_logo.png", package="EGSEA")
if (file.exists(logo.file)){
file.copy(logo.file, report.dir)
img = hmakeTag("img", src="EGSEA_logo.png", width="150",
style="float:left;")
title = hmakeTag("h1", "Gene Set Testing Report",
style="color:#0f284f; position:relative; top:18px; left:10px;")
tmp = hmakeTag("div", paste0(img, title))
hwrite(tmp, div=TRUE, heading=1, br=TRUE, page=p)
}else
hwrite("EGSEA Gene Set Testing Report", style="color:#0f284f",
heading=1, br=TRUE, page=p)
hwrite("Analysis Parameters", style="color:#22519b",
heading=2, page=p)
#### write analysis parameters
hwrite(paste0(hmakeTag("b", "Total number of genes: ") ,
length(gs.annots[[1]]$featureIDs)),
br=TRUE, page=p)
hwrite(paste0(hmakeTag("b", "Total number of samples: " ),
sampleSize), br=TRUE, page=p)
hwrite(paste0(hmakeTag("b", "Number of contrasts: ") ,contr.num),
br=TRUE, page=p)
base.names = names(baseInfo)
base.vers = sapply(base.names, function(x) as.character(baseInfo[[x]]$version))
base.pkgs = sapply(base.names, function(x) baseInfo[[x]]$package)
baseMethods = paste0(base.names, " (", base.pkgs, ":", base.vers, ")")
hwrite(paste0(hmakeTag("b","Base GSEA methods: ") ,paste(baseMethods,
collapse=", ")), br=TRUE, page=p)
hwrite(paste0(hmakeTag("b","P-value combine method: " ),
combineMethod), br=TRUE, page=p)
hwrite(paste0(hmakeTag("b","Sorting statistic: " ),sort.by), br=TRUE,
page=p)
hwrite(paste0(hmakeTag("b","Fold changes calculated: " ),logFC.cal),
br=TRUE, page=p)
hwrite(paste0(hmakeTag("b","Gene IDs - Symbols mapping used: " ),
ifelse(nrow(symbolsMap) > 0, "Yes",
"No")), br=TRUE, page=p)
hwrite(paste0(hmakeTag("b","Organism: " ), gs.annots[[1]]$species),
br=TRUE, page=p)
gs.cols = paste0(gs.names, " (", gs.versions, ", ", gs.dates, ")")
hwrite(paste0(hmakeTag("b","Gene set collections: " ), paste(gs.cols, collapse=", ")),
br=TRUE, page=p)
hwrite(paste0(hmakeTag("b","EGSEA version: " ), egsea.ver),
br=TRUE, page=p)
hwrite(paste0(hmakeTag("b","EGSEAdata version: " ), egseadata.ver),
br=TRUE, page=p)
hwrite(hmakeTag("br"), br=TRUE)
hwrite("Analysis Results", style="color:#22519b",
heading=2, page=p)
main = ""
## Main Page Generation
for (i in 1:contr.num){
file.name = paste0(ranked.gs.dir, "/ranked-", gs.labels,
"-gene-sets-",
sub(" - ", "-", contr.names[i]),
'.html')
temp = paste0(gs.names, " (", hmakeTag("a", "Stats Table",
href=file.name))
file.name = paste0(hm.dir,
sub(" - ", "-", contr.names[i]),
'-allHeatmaps.html')
temp = paste0(temp, ", ", hmakeTag("a", "Heatmaps" ,
href=file.name))
kegg.idx = grep("^kegg", gs.labels)
if (length(kegg.idx) != 0){
file.name = paste0(pv.dir,
sub(" - ", "-", contr.names[i]),
'-allPathways.html')
temp[kegg.idx] = paste0(temp[kegg.idx], ", ",
hmakeTag("a", "Pathways" , href=file.name))
}
go.idx = c(grep("^c5", gs.labels), grep("^gsdbgo", gs.labels))
if (length(go.idx) !=0 ){
file.name = paste0(go.dir,
sub(" - ", "-", contr.names[i]),
"-", gs.labels[go.idx], '-allGOgraphs.html')
temp[go.idx] = paste0(temp[go.idx], ", ", hmakeTag("a",
"GO Graphs" , href=file.name))
}
file.name = paste0(summary.dir, sub(" - ", "-",
contr.names[i]), "-", gs.labels,
"-summary.html")
temp = paste0(temp, ", ", hmakeTag("a", "Summary Plots",
href=file.name))
file.name = paste0(ranked.gs.dir, "/ranked-", gs.labels,
"-gene-sets-",
sub(" - ", "-", contr.names[i]), '.txt')
temp = paste0(temp, ", ", hmakeTag("a", "Download
Stats",target="_blank", href=file.name))
temp = paste0(temp, ")")
temp = hmakeTag("il", paste(hmakeTag("b",
contr.names[i]), hmakeTag("ul", paste(hmakeTag("li", temp),
collapse="\n")), sep="\n"))
main = paste(main, temp, sep="\n")
}
if (contr.num > 1){
file.name = paste0(ranked.gs.dir, "/ranked-", gs.labels,
"-gene-sets-compare.html")
temp = paste0(gs.names, " (", hmakeTag("a", "Stats Table",
href=file.name))
file.name = paste0(hm.dir, 'allHeatmaps.html')
temp = paste0(temp, ", ", hmakeTag("a", "Heatmaps" ,
href=file.name))
kegg.idx = grep("^kegg", gs.labels)
if (length(kegg.idx) != 0){
file.name = paste0(pv.dir, 'allPathways.html')
temp[kegg.idx] = paste0(temp[kegg.idx], ", ",
hmakeTag("a", "Pathways" , href=file.name))
}
go.idx = c(grep("^c5", gs.labels), grep("^gsdbgo", gs.labels))
if (length(go.idx) !=0 ){
file.name = paste0(go.dir, gs.labels[go.idx], '-allGOgraphs.html')
temp[go.idx] = paste0(temp[go.idx], ", ", hmakeTag("a",
"GO Graphs" , href=file.name))
}
file.name = paste0(summary.dir, gs.labels, "-summary.html")
temp = paste0(temp, ", ", hmakeTag("a", "Summary Plots",
href=file.name))
file.name = paste0(ranked.gs.dir, "/ranked-", gs.labels,
"-gene-sets-compare.txt")
temp = paste0(temp, ", ", hmakeTag("a", "Download Stats" ,
target="_blank", href=file.name))
temp = paste0(temp, ")")
temp = hmakeTag("il", paste(hmakeTag("b", "Comparison
Analysis"), hmakeTag("ul", paste(hmakeTag("li", temp), collapse="\n")),
sep="\n"))
main = paste(main, temp, sep="\n")
}
hwrite(hmakeTag("ul", main, style="list-style-type:square"), page=p)
hwrite(hmakeTag("footer", "Report generated by EGSEA package. For any
inquiry, please contact the authors."))
closePage(p)
}
|
/R/htmlUtils.R
|
no_license
|
malhamdoosh/EGSEA
|
R
| false
| false
| 30,305
|
r
|
#Ensemble of Gene Set Enrichment Analyses
#
# Author: Monther Alhamdoosh, E:m.hamdoosh@gmail.com
###############################################################################
writeResultsToHTML <- function(contrast.name, gsa.results, gs.annot, method,
file.name){
title = paste0("Gene Set Enrichment Analysis on ", gs.annot@name, "
using ", method, " (",
contrast.name, ")")
path = strsplit(file.name, "/")[[1]]
file = path[length(path)]
file = gsub(".txt", ".html", file)
path = paste0(paste(path[1:length(path) -1], collapse = "/"), "/")
if (!is.data.frame(gsa.results)){
rows = row.names(gsa.results)
gsa.results = data.frame(gsa.results)
gsa.results = lapply(gsa.results, as.numeric)
gsa.results = as.data.frame(gsa.results)
row.names(gsa.results) = rows
}
annot.frame = gs.annot@anno[match(rownames(gsa.results),
gs.annot@anno[,"GeneSet"]),]
if ("BroadUrl" %in% colnames(annot.frame)){
annot.frame[, "BroadUrl"] = hmakeTag("a", "Go to Broad",
href=annot.frame[, "BroadUrl"])
}
if (length(grep("^gsdbgo", gs.annot@label)) == 1){
annot.frame[, "SourceDB"] = "Go to AmiGO"
annot.frame[, "SourceURL"] = paste0("http://amigo.geneontology.org/amigo/term/",
annot.frame[, "GOID"])
}
if ("SourceURL" %in% colnames(annot.frame)){
annot.frame[, "SourceDB"] = hmakeTag("a", annot.frame[,
"SourceDB"], href=annot.frame[, "SourceURL"])
annot.frame = annot.frame[, "SourceURL" !=
colnames(annot.frame)]
}
table.data = data.frame(annot.frame, gsa.results)
if ("Rank" %in% colnames(table.data) ){
table.data[, "Rank"] = as.numeric(table.data[, "Rank"])
}
if ("p.value" %in% colnames(table.data)){
table.data[, "p.value"] = as.character(table.data[, "p.value"])
}
if ("p.adj" %in% colnames(table.data)){
table.data[, "p.adj"] = as.character(table.data[, "p.adj"])
}
capture.output(HTMLsortedTable(table.data, title, title, file=file, path=path))
annot.frame = gs.annot@anno[match(rownames(gsa.results),
gs.annot@anno[,"GeneSet"]),]
table.data = data.frame(Rank=as.numeric(seq(1, nrow(gsa.results))),
annot.frame,
gsa.results)
write.table(table.data,
file=file.name,
sep="\t",
quote=FALSE,
row.names=FALSE)
}
writeEGSEAResultsToHTML <- function(contrast.name, gsa.results, gs.annot,
file.name, comparison=FALSE, interactive=TRUE){
title = paste0("Gene Set Enrichment Analysis on ", gs.annot@name, "
using EGSEA (",
contrast.name, ")")
path = strsplit(file.name, "/")[[1]]
file = path[length(path)]
file = gsub(".txt", ".html", file)
path = paste0(paste(path[1:length(path) -1], collapse = "/"), "/")
if (!is.data.frame(gsa.results)){
rows = row.names(gsa.results)
gsa.results = data.frame(gsa.results)
gsa.results = lapply(gsa.results, as.numeric)
gsa.results = as.data.frame(gsa.results)
row.names(gsa.results) = rows
}
ids = gs.annot@anno[match(rownames(gsa.results), gs.annot@anno[,2]),
"ID"]
if (!comparison){
heatmaps = paste0("../hm-top-gs-", gs.annot@label, "/",
sub(" - ", "-", contrast.name), "/", ids,
".heatmap.pdf")
csvfiles = paste0("../hm-top-gs-", gs.annot@label, "/",
sub(" - ", "-", contrast.name), "/", ids,
".heatmap.csv")
}
else{
heatmaps = paste0("../hm-top-gs-", gs.annot@label, "/", ids,
".heatmap.multi.pdf")
csvfiles = paste0("../hm-top-gs-", gs.annot@label, "/", ids,
".heatmap.multi.csv")
}
heatmaps = hmakeTag("a", "Show Map", href=heatmaps)
csvfiles = hmakeTag("a", "Interpret Results", href=csvfiles)
heatmaps = paste(heatmaps, csvfiles)
if (length(grep("^kegg", gs.annot@label)) == 0){
table.data = data.frame(Rank=as.numeric(seq(1,
nrow(gsa.results))),
Heatmaps=heatmaps,
gs.annot@anno[match(rownames(gsa.results),
gs.annot@anno[,"GeneSet"]),],
gsa.results)
write.table(table.data[, -c(which(colnames(table.data) ==
"Heatmaps"))],
file=file.name,
sep="\t",
quote=FALSE,
row.names=FALSE)
}
else{
if (!comparison){
pathways = paste0("../pv-top-gs-", gs.annot@label, "/",
sub(" - ", "-", contrast.name), "/",
ids, ".pathview.png")
} else{
pathways = paste0("../pv-top-gs-", gs.annot@label, "/",
ids, ".pathview.multi.png")
}
pathways = hmakeTag("a", "Show Pathway", href=pathways)
pathways = paste(pathways, csvfiles)
table.data = data.frame(Rank=as.numeric(seq(1,
nrow(gsa.results))),
Heatmaps=heatmaps,
Pathways=pathways,
gs.annot@anno[match(rownames(gsa.results),
gs.annot@anno[,"GeneSet"]),],
gsa.results)
write.table(table.data[, -c(which(colnames(table.data) ==
"Heatmaps"), which(colnames(table.data) == "Pathways"))],
file=file.name,
sep="\t",
quote=FALSE,
row.names=FALSE)
}
for (i in 1:ncol(table.data)){
if (is.numeric(table.data[, i])){
if (!anyOccur(colnames(table.data)[i], c("p.value",
"p.adj", "min.pvalue")))
table.data[, i] = round(table.data[,i], 2)
else if (! interactive)
table.data[, i] = round(table.data[,i], 6)
}
}
if ("BroadUrl" %in% colnames(table.data)){
table.data[, "BroadUrl"] = hmakeTag("a", "Go to Broad",
href=table.data[, "BroadUrl"],
'target'='_blank')
}
if (length(grep("^gsdbgo", gs.annot@label)) == 1){
table.data[, "SourceDB"] = "Go to AmiGO"
table.data[, "SourceURL"] = paste0("http://amigo.geneontology.org/amigo/term/",
table.data[, "GOID"])
}
if ("SourceURL" %in% colnames(table.data)){
table.data[, "SourceDB"] = hmakeTag("a", table.data[,
"SourceDB"], href=table.data[, "SourceURL"])
table.data = table.data[, "SourceURL" != colnames(table.data)]
}
if ("GeneSet" %in% colnames(table.data)){
table.data[, "GeneSet"] = gsub("_", " ", table.data[,
"GeneSet"])
}
if ("Description" %in% colnames(table.data)){
desc = c()
for ( t in table.data[, "Description"]){
l = stri_length(t)
if (is.na(l) || l <= 50)
desc = c(desc, t)
else
desc = c(desc, paste0("<span title='", t,"'>",
substr(t, 1, 50), " ... </span>"))
}
table.data[, "Description"] = desc
}
if ("direction" %in% colnames(table.data)){
table.data[, "direction"] =
as.character(lapply(as.numeric(table.data[, "direction"]),
function(x) if (x > 0) "Up" else if (x < 0)
"Down" else "Neutral"))
}
if ("Rank" %in% colnames(table.data) ){
table.data[, "Rank"] = as.numeric(table.data[, "Rank"])
}
if ("p.value" %in% colnames(table.data)){
table.data[, "p.value"] = as.character(table.data[, "p.value"])
}
if ("p.adj" %in% colnames(table.data)){
table.data[, "p.adj"] = as.character(table.data[, "p.adj"])
}
capture.output(HTMLsortedTable(table.data, title, title, file=file, path=path))
if(interactive){
saveWidget(widget=datatable(table.data,escape = FALSE),
selfcontained=FALSE, libdir=file.path(path,"lib"),
file=file.path(path,file)) # DT::datatable
}else{
capture.output(HTMLsortedTable(table.data, title, title, file=file, path=path))
}
}
anyOccur <- function(string, list){
occur = FALSE
for (x in list){
if (length(grep(x, string)) > 0){
occur = TRUE
break
}
}
return(occur)
}
generateSummaryPage.comparison <- function(contrast.names, gs.annot,
sum.plot.axis, sort.by, file.name, interactive=TRUE){
path = strsplit(file.name, "/")[[1]]
file = path[length(path)]
file = gsub(".txt", ".html", file)
path = paste0(paste(path[1:length(path) -1], collapse = "/"), "/")
title = paste0("Gene Set Enrichment Analysis on ", gs.annot@name, "
using EGSEA (Comparison Analysis)")
if (length(contrast.names) == 2 ){
img.files = paste0("./", gs.annot@label, "-summary-",
sum.plot.axis, c("-rank", "-dir"),".png")
plot.titles = c("Summary plot based on gene set rank and size",
"Summary plot based on regulation direction and significance")
}
else if (length(contrast.names) > 2){
img.files = c()
plot.titles = c()
anchor.names = c()
anchors.table = matrix("-", length(contrast.names),
length(contrast.names))
colnames(anchors.table) = contrast.names
rownames(anchors.table) = contrast.names
for (i in 1:(length(contrast.names)-1)){
for (j in (i+1):length(contrast.names)){
anchor.names = c(anchor.names, c(paste0('anch',
i,j),paste0('anch', i,j)))
anchors.table[i, j] = hmakeTag("a", "View",
style="text-decoration: none",
href=paste0("#", 'anch', i,j))
anchors.table[j, i] = hmakeTag("a", "View",
style="text-decoration: none",
href=paste0("#", 'anch', i,j))
img.files = c(img.files, paste0("./",
gs.annot@label, paste0('-',
i,j), "-summary-",
sum.plot.axis, c("-rank", "-dir"),
".png"))
titles = c("Summary plot based on gene set rank
and size <br/>",
"Summary plot based on
regulation direction and significance <br/>")
titles = paste0(titles, contrast.names[i]," |
", contrast.names[j])
plot.titles = c(plot.titles, titles)
}
}
}
else{
img.files = c()
plot.titles = c()
}
# add summary heatmaps if more than 2 contrasts to the comparison analysis page
if (length(contrast.names) >= 2){
file.name = paste0("./",gs.annot@label,
"-summary-heatmap-", sort.by,".png")
img.files = c(img.files, file.name)
fig.title = paste0("Summary heatmap of the top gene sets (",
hmakeTag("a", "Interpret Results",
href=sub(".png", ".csv", file.name)), ")")
plot.titles = c(plot.titles, fig.title)
}
if(interactive){
html.img.files = sub(".png", ".html", img.files)
pdfs = hmakeTag("a", "Open interactive plot", href=html.img.files)
# actually they are html files. but for convention we still call it pdf
pdf.files = html.img.files
}else{
pdf.img.files = sub(".png", ".pdf", img.files)
pdfs = hmakeTag("a", "Download pdf file", href=pdf.img.files)
pdf.files = pdf.img.files
}
file.name.bar = paste0("./",
"comparison-", gs.annot@label, "-bar-plot-",
sort.by ,".png")
img.files = c(img.files, file.name.bar)
plot.titles = c(plot.titles, "Bar plot of the top gene sets")
pdf.files = c(pdf.files, sub(".png", ".pdf", file.name.bar))
images = hmakeTag("a", hmakeTag("img", src=img.files, width=500),
href=pdf.files)
content = paste(images, plot.titles, pdfs, sep="<br/>")
if (length(contrast.names) > 2){
anchors = hmakeTag("a", "", name=anchor.names)
content = paste(anchors, content, sep="")
}
if (length(content) %% 2 != 0){
content = c(content, "")# to make sure there are multiple of 2s
}
content = matrix(content, ncol=2, byrow = TRUE)
p = openPage(file, dirname=path, title=title)
hwrite(title, heading=1, br=TRUE, page=p)
if (length(contrast.names) > 2){
hwrite(anchors.table, align="center", border=1, width="1000px",
cellspacing=0, row.bgcolor='#ffffaa',
row.style=list('font-weight:bold'),
col.bgcolor='#ffffaa',
col.style=list('font-weight:bold'),
page=p)
}
hwrite(content, align="center", border=0, page=p)
closePage(p)
}
generateAllGOgraphsPage.comparison <- function(contrast.names, gs.annot,
sort.by, file.name){
path = strsplit(file.name, "/")[[1]]
file = path[length(path)]
file = gsub(".txt", ".html", file)
path = paste0(paste(path[1:length(path) -1], collapse = "/"), "/")
title = paste0("Gene Set Enrichment Analysis on ", gs.annot@name, "
using EGSEA (Comparison Analysis)")
check.files = file.exists(paste0(path,
"comparison-", gs.annot@label, "-top-",
sort.by, "-", c("BP", "MF", "CC"), ".png"))
if (sum(check.files) == 0 )
content = hmakeTag("h3", "GO graphs could not be generated!")
else{
img.files = paste0("./comparison-", gs.annot@label, "-top-",
sort.by, "-", c("BP", "MF", "CC"), ".png")[check.files]
plot.titles =c("Top GO Biological Processes ", "Top GO Molecular Functions",
"Top GO Cellular Components ")[check.files]
pdf.files = sub(".png", ".pdf", img.files)
images = hmakeTag("a", hmakeTag("img", src=img.files, width=500),
href=pdf.files)
pdfs = hmakeTag("a", "Download pdf file", href=pdf.files)
content = paste(images, plot.titles, pdfs, sep="<br/>")
if (length(content) %% 2 != 0){
content = c(content, "")# to make sure there are multiple of 2s
}
content = matrix(content, ncol=2, byrow = TRUE)
}
p = openPage(file, dirname=path, title=title)
hwrite(title, heading=1, br=TRUE, page=p)
hwrite(content, align="center", border=0, page=p)
closePage(p)
}
generateAllGOgraphsPage <- function(contrast.name, gs.annot, sort.by,
file.name){
title = paste0("Gene Set Enrichment Analysis on ", gs.annot@name, "
using EGSEA (",
contrast.name, ")")
path = strsplit(file.name, "/")[[1]]
file = path[length(path)]
file = gsub(".txt", ".html", file)
path = paste0(normalizePath(paste(path[1:length(path) -1], collapse =
"/")), "/")
tmp = c("BP", "MF", "CC")
img.files = c()
plot.titles = c()
cat.names = c("Top GO Biological Process.", "Top GO Molecular
Functions.",
"Top GO Cellular Components.")
names(cat.names) = tmp
for (cat in tmp){
f= paste0(sub(" - ", "-", contrast.name), "-", gs.annot@label,
"-top-", sort.by, "-", cat, ".png")
if (file.exists(paste0(path, f))){
img.files = c(img.files, f)
plot.titles = c(plot.titles, cat.names[cat])
}
}
if (length(img.files) > 0){
img.files = paste0("./", img.files)
pdf.files = sub(".png", ".pdf", img.files)
images = hmakeTag("a", hmakeTag("img", src=img.files,
width=600), href=pdf.files)
pdfs = hmakeTag("a", "Download pdf file", href=pdf.files)
content = paste(images, plot.titles, pdfs, sep="<br/>")
if (length(content) %% 2 != 0){
content = c(content, "")# to make sure there are
# multiple of 2s
}
content = matrix(content, ncol=2, byrow = TRUE)
}
else
content = hmakeTag("h3", "GO graphs could not be generated!")
p = openPage(file, dirname=path, title=title)
hwrite(title, heading=1, br=TRUE, page=p)
hwrite(content, align="center", border=0, page=p)
closePage(p)
}
generateSummaryPage <- function(contrast.name, gs.annot, sum.plot.axis,
sort.by, contr.num, file.name, interactive=TRUE){
title = paste0("Gene Set Enrichment Analysis on ", gs.annot@name, "
using EGSEA (",
contrast.name, ")")
img.files = paste0("./", sub(" - ", "-", contrast.name), "-",
gs.annot@label, "-summary-", sum.plot.axis ,"-rank.png")
img.files = c(img.files, paste0("./", sub(" - ", "-", contrast.name),
"-", gs.annot@label, "-summary-", sum.plot.axis,"-dir.png"))
path = strsplit(file.name, "/")[[1]]
path = paste0(paste(path[1:length(path) -1], collapse = "/"), "/")
mds.file = paste0(sub(" - ", "-", contrast.name), "-", gs.annot@label,
"-methods.png")
if(interactive){
pdf.files = sub(".png", ".html", img.files)
pdfs = hmakeTag("a", "Open interactive plot", href=pdf.files)
}else{
pdf.files = sub(".png", ".pdf", img.files)
pdfs = hmakeTag("a", "Download pdf file", href=pdf.files)
}
if (file.exists(paste0(path, mds.file))){
img.files = c(img.files, paste0("./", mds.file))
ta = hmakeTag("a", "Download pdf file", href=sub(".png", ".pdf", paste0("./", mds.file)))
pdfs = c(pdfs, ta)
pdf.files = c(pdf.files, sub(".png", ".pdf", paste0("./", mds.file)))
}
# add summary heatmap to the contrast summary page if no. contrasts = 1
if (contr.num == 1){
file.name.sum = paste0("./", gs.annot@label,
"-summary-heatmap-", sort.by,".png")
img.files = c(img.files, file.name.sum)
ta = hmakeTag("a", "Download pdf file", href=sub(".png", ".pdf", file.name.sum))
pdfs = c(pdfs, ta)
pdf.files = c(pdf.files, sub(".png", ".pdf", file.name.sum))
}
file.name.bar = paste0("./",contrast.name,
"-", gs.annot@label, "-bar-plot-",
sort.by,".png")
img.files = c(img.files, file.name.bar)
pdf.files = c(pdf.files, sub(".png", ".pdf", file.name.bar))
images = hmakeTag("a", hmakeTag("img", src=img.files, width=500),
href=pdf.files)
pdfs = c(pdfs, hmakeTag("a", "Download pdf file", href=sub(".png", ".pdf", file.name.bar)))
plot.titles = c("Summary plot based on gene set rank and size",
"Summary plot based on regulation direction and significance")
if (file.exists(paste0(path, mds.file)))
plot.titles = c(plot.titles,
"MDS plot for the gene set ranking in different base methods.")
if (contr.num == 1) {
fig.title = paste0("Summary heatmap of the top gene sets (",
hmakeTag("a", "Interpret Results",
href=sub(".png", ".csv", file.name.sum)), ")")
plot.titles = c(plot.titles, fig.title)
}
content = paste(images, plot.titles, pdfs, sep="<br/>")
if (length(content) %% 2 != 0){
content = c(content, "")# to make sure there are multiple of 2s
}
content = matrix(content, ncol=2, byrow = TRUE)
path = strsplit(file.name, "/")[[1]]
file = path[length(path)]
file = gsub(".txt", ".html", file)
path = paste0(paste(path[1:length(path) -1], collapse = "/"), "/")
p = openPage(file, dirname=path, title=title)
hwrite(title, heading=1, br=TRUE, page=p)
hwrite(content, align="center", border=0, page=p)
closePage(p)
}
generateAllHeatmapsPage <- function(contrast.name, gsa.results, gs.annot,
file.name, comparison=FALSE){
title = paste0("Gene Set Enrichment Analysis on ", gs.annot@name, "
using EGSEA (",
contrast.name, ")")
ids = gs.annot@anno[match(rownames(gsa.results), gs.annot@anno[,2]),
"ID"]
names = gs.annot@anno[match(rownames(gsa.results), gs.annot@anno[,2]),
"GeneSet"]
if(!comparison){
heatmaps.img = paste0("./", sub(" - ", "-", contrast.name),
"/", ids, ".heatmap.png")
heatmaps = paste0("./", sub(" - ", "-", contrast.name), "/",
ids, ".heatmap.pdf")
csvfiles = paste0("./", sub(" - ", "-", contrast.name), "/",
ids, ".heatmap.csv")
} else{
heatmaps.img = paste0("./", ids, ".heatmap.multi.png")
heatmaps = paste0("./", ids, ".heatmap.multi.pdf")
csvfiles = paste0("./", ids, ".heatmap.multi.csv")
}
heatmaps.img = hmakeTag("a", hmakeTag("img", src=heatmaps.img,
width=300, center=TRUE), href=heatmaps)
heatmaps = hmakeTag("a", "Large Map", href=heatmaps)
csvfiles = hmakeTag("a", "Interpret Results", href=csvfiles)
heatmaps = paste(heatmaps, csvfiles)
heatmaps = paste(heatmaps.img, ids, substr(names, 1, 30), heatmaps,
sep="<br />")
# head(heatmaps)
if (length(heatmaps) %% 5 != 0)
heatmaps = c(heatmaps, rep("", 5 - (length(heatmaps) %% 5 )))
hm.table = matrix(heatmaps, ncol=5, byrow = TRUE)
path = strsplit(file.name, "/")[[1]]
file = path[length(path)]
file = gsub(".txt", ".html", file)
path = paste0(paste(path[1:length(path) -1], collapse = "/"), "/")
p = openPage(file, dirname=path, title=title)
hwrite(title, heading=1, br=TRUE, page=p)
hwrite(hm.table, col.width = 200, page=p)
closePage(p)
}
generateAllPathwaysPage <- function(contrast.name, gsa.results, gs.annot,
file.name, comparison=FALSE){
title = paste0("Gene Set Enrichment Analysis on ", gs.annot@name, "
using EGSEA (",
contrast.name, ")")
ids = gs.annot@anno[match(rownames(gsa.results), gs.annot@anno[,2]),
"ID"]
names = gs.annot@anno[match(rownames(gsa.results), gs.annot@anno[,2]),
"GeneSet"]
if (!comparison){
pathways.img = paste0("./", sub(" - ", "-", contrast.name),
"/", ids, ".pathview.png")
csvfiles = paste0("../hm-top-gs-",gs.annot@label, "/", sub(" -
", "-", contrast.name), "/", ids, ".heatmap.csv")
pathways = paste0("./", sub(" - ", "-", contrast.name), "/",
ids, ".pathview.png")
} else{
pathways.img = paste0("./", ids, ".pathview.multi.png")
csvfiles = paste0("../hm-top-gs-",gs.annot@label, "/", ids,
".heatmap.multi.csv")
pathways = paste0("./", ids, ".pathview.multi.png")
}
pathways.img = hmakeTag("a", hmakeTag("img", src=pathways.img,
width=300, center=TRUE), href=pathways)
csvfiles = hmakeTag("a", "Interpret Results", href=csvfiles)
pathways = hmakeTag("a", "Large Pathway", href=pathways)
pathways = paste(pathways, csvfiles)
pathways = paste(pathways.img,ids, substr(names, 1, 30), pathways,
sep="<br />")
# head(heatmaps)
if (length(pathways) %% 5 != 0)
pathways = c(pathways, rep("", 5 - (length(pathways) %% 5 )))
hm.table = matrix(pathways, ncol=5, byrow = TRUE)
path = strsplit(file.name, "/")[[1]]
file = path[length(path)]
file = gsub(".txt", ".html", file)
path = paste0(paste(path[1:length(path) -1], collapse = "/"), "/")
p = openPage(file, dirname=path, title=title)
hwrite(title, heading=1, br=TRUE, page=p)
hwrite(hm.table, col.width = 200, page=p)
closePage(p)
}
createEGSEAReport <- function(sampleSize, contr.names, gs.annots, baseInfo,
combineMethod,
sort.by, report.dir,
logFC.cal, symbolsMap,
egsea.ver,
egseadata.ver){
contr.num = length(contr.names)
gs.labels = sapply(gs.annots, function(x) x$label)
gs.names = sapply(gs.annots, function(x) x$name)
gs.versions = sapply(gs.annots, function(x) x$version)
gs.dates = sapply(gs.annots, function(x) x$date)
ranked.gs.dir = "./ranked-gene-sets-base"
pv.dir = paste0("./pv-top-gs-", gs.labels[grep("^kegg", gs.labels)],
"/")
hm.dir = paste0("./hm-top-gs-", gs.labels, "/")
summary.dir = "./summary/"
go.dir = "./go-graphs/"
p = openPage("index.html", dirname=report.dir, title="Ensemble of Gene
Set Enrichment Analyses (EGSEA) - Report")
logo.file = system.file("logo", "EGSEA_logo.png", package="EGSEA")
if (file.exists(logo.file)){
file.copy(logo.file, report.dir)
img = hmakeTag("img", src="EGSEA_logo.png", width="150",
style="float:left;")
title = hmakeTag("h1", "Gene Set Testing Report",
style="color:#0f284f; position:relative; top:18px; left:10px;")
tmp = hmakeTag("div", paste0(img, title))
hwrite(tmp, div=TRUE, heading=1, br=TRUE, page=p)
}else
hwrite("EGSEA Gene Set Testing Report", style="color:#0f284f",
heading=1, br=TRUE, page=p)
hwrite("Analysis Parameters", style="color:#22519b",
heading=2, page=p)
#### write analysis parameters
hwrite(paste0(hmakeTag("b", "Total number of genes: ") ,
length(gs.annots[[1]]$featureIDs)),
br=TRUE, page=p)
hwrite(paste0(hmakeTag("b", "Total number of samples: " ),
sampleSize), br=TRUE, page=p)
hwrite(paste0(hmakeTag("b", "Number of contrasts: ") ,contr.num),
br=TRUE, page=p)
base.names = names(baseInfo)
base.vers = sapply(base.names, function(x) as.character(baseInfo[[x]]$version))
base.pkgs = sapply(base.names, function(x) baseInfo[[x]]$package)
baseMethods = paste0(base.names, " (", base.pkgs, ":", base.vers, ")")
hwrite(paste0(hmakeTag("b","Base GSEA methods: ") ,paste(baseMethods,
collapse=", ")), br=TRUE, page=p)
hwrite(paste0(hmakeTag("b","P-value combine method: " ),
combineMethod), br=TRUE, page=p)
hwrite(paste0(hmakeTag("b","Sorting statistic: " ),sort.by), br=TRUE,
page=p)
hwrite(paste0(hmakeTag("b","Fold changes calculated: " ),logFC.cal),
br=TRUE, page=p)
hwrite(paste0(hmakeTag("b","Gene IDs - Symbols mapping used: " ),
ifelse(nrow(symbolsMap) > 0, "Yes",
"No")), br=TRUE, page=p)
hwrite(paste0(hmakeTag("b","Organism: " ), gs.annots[[1]]$species),
br=TRUE, page=p)
gs.cols = paste0(gs.names, " (", gs.versions, ", ", gs.dates, ")")
hwrite(paste0(hmakeTag("b","Gene set collections: " ), paste(gs.cols, collapse=", ")),
br=TRUE, page=p)
hwrite(paste0(hmakeTag("b","EGSEA version: " ), egsea.ver),
br=TRUE, page=p)
hwrite(paste0(hmakeTag("b","EGSEAdata version: " ), egseadata.ver),
br=TRUE, page=p)
hwrite(hmakeTag("br"), br=TRUE)
hwrite("Analysis Results", style="color:#22519b",
heading=2, page=p)
main = ""
## Main Page Generation
for (i in 1:contr.num){
file.name = paste0(ranked.gs.dir, "/ranked-", gs.labels,
"-gene-sets-",
sub(" - ", "-", contr.names[i]),
'.html')
temp = paste0(gs.names, " (", hmakeTag("a", "Stats Table",
href=file.name))
file.name = paste0(hm.dir,
sub(" - ", "-", contr.names[i]),
'-allHeatmaps.html')
temp = paste0(temp, ", ", hmakeTag("a", "Heatmaps" ,
href=file.name))
kegg.idx = grep("^kegg", gs.labels)
if (length(kegg.idx) != 0){
file.name = paste0(pv.dir,
sub(" - ", "-", contr.names[i]),
'-allPathways.html')
temp[kegg.idx] = paste0(temp[kegg.idx], ", ",
hmakeTag("a", "Pathways" , href=file.name))
}
go.idx = c(grep("^c5", gs.labels), grep("^gsdbgo", gs.labels))
if (length(go.idx) !=0 ){
file.name = paste0(go.dir,
sub(" - ", "-", contr.names[i]),
"-", gs.labels[go.idx], '-allGOgraphs.html')
temp[go.idx] = paste0(temp[go.idx], ", ", hmakeTag("a",
"GO Graphs" , href=file.name))
}
file.name = paste0(summary.dir, sub(" - ", "-",
contr.names[i]), "-", gs.labels,
"-summary.html")
temp = paste0(temp, ", ", hmakeTag("a", "Summary Plots",
href=file.name))
file.name = paste0(ranked.gs.dir, "/ranked-", gs.labels,
"-gene-sets-",
sub(" - ", "-", contr.names[i]), '.txt')
temp = paste0(temp, ", ", hmakeTag("a", "Download
Stats",target="_blank", href=file.name))
temp = paste0(temp, ")")
temp = hmakeTag("il", paste(hmakeTag("b",
contr.names[i]), hmakeTag("ul", paste(hmakeTag("li", temp),
collapse="\n")), sep="\n"))
main = paste(main, temp, sep="\n")
}
if (contr.num > 1){
file.name = paste0(ranked.gs.dir, "/ranked-", gs.labels,
"-gene-sets-compare.html")
temp = paste0(gs.names, " (", hmakeTag("a", "Stats Table",
href=file.name))
file.name = paste0(hm.dir, 'allHeatmaps.html')
temp = paste0(temp, ", ", hmakeTag("a", "Heatmaps" ,
href=file.name))
kegg.idx = grep("^kegg", gs.labels)
if (length(kegg.idx) != 0){
file.name = paste0(pv.dir, 'allPathways.html')
temp[kegg.idx] = paste0(temp[kegg.idx], ", ",
hmakeTag("a", "Pathways" , href=file.name))
}
go.idx = c(grep("^c5", gs.labels), grep("^gsdbgo", gs.labels))
if (length(go.idx) !=0 ){
file.name = paste0(go.dir, gs.labels[go.idx], '-allGOgraphs.html')
temp[go.idx] = paste0(temp[go.idx], ", ", hmakeTag("a",
"GO Graphs" , href=file.name))
}
file.name = paste0(summary.dir, gs.labels, "-summary.html")
temp = paste0(temp, ", ", hmakeTag("a", "Summary Plots",
href=file.name))
file.name = paste0(ranked.gs.dir, "/ranked-", gs.labels,
"-gene-sets-compare.txt")
temp = paste0(temp, ", ", hmakeTag("a", "Download Stats" ,
target="_blank", href=file.name))
temp = paste0(temp, ")")
temp = hmakeTag("il", paste(hmakeTag("b", "Comparison
Analysis"), hmakeTag("ul", paste(hmakeTag("li", temp), collapse="\n")),
sep="\n"))
main = paste(main, temp, sep="\n")
}
hwrite(hmakeTag("ul", main, style="list-style-type:square"), page=p)
hwrite(hmakeTag("footer", "Report generated by EGSEA package. For any
inquiry, please contact the authors."))
closePage(p)
}
|
context("vig-getting-started")
# Testing similar workflow as in getting-started vignette
library("git2r")
# start project in a tempdir
site_dir <- tempfile("new-")
suppressMessages(wflow_start(site_dir, change_wd = FALSE))
on.exit(unlink(site_dir, recursive = TRUE, force = TRUE))
site_dir <- workflowr:::absolute(site_dir)
r <- repository(path = site_dir)
s <- wflow_status(project = site_dir)
test_that("wflow_start provides necessary infrastructure", {
expect_true(dir.exists(file.path(site_dir, ".git")))
expect_true(dir.exists(file.path(site_dir, "analysis")))
expect_true(file.exists(file.path(site_dir, "analysis/_site.yml")))
expect_true(file.exists(file.path(site_dir, "analysis/index.Rmd")))
expect_true(file.exists(file.path(site_dir,
paste0(basename(site_dir), ".Rproj"))))
expect_true(length(commits(r)) == 1)
})
rmd <- rownames(s$status)
stopifnot(length(rmd) > 0)
# Expected html files
html <- workflowr:::to_html(rmd, outdir = s$docs)
test_that("wflow_build builds the website, but only once", {
suppressMessages(o <- wflow_build(view = FALSE, project = site_dir))
expect_identical(o$built, rmd)
expect_true(all(file.exists(html)))
expect_silent(o <- wflow_build(view = FALSE, project = site_dir))
})
test_that("wflow_view opens website.", {
expected <- file.path(s$docs, "index.html")
actual <- wflow_view(dry_run = TRUE, project = site_dir)
expect_identical(actual, expected)
})
test_rmd <- file.path(s$analysis, "first-analysis.Rmd")
file.copy("files/workflowr-template.Rmd", test_rmd)
# Expected html file
test_html <- workflowr:::to_html(test_rmd, outdir = s$docs)
s <- wflow_status(project = site_dir)
test_that("wflow_open sets correct working directory", {
cwd <- getwd()
on.exit(setwd(cwd))
wflow_open(files = basename(test_rmd), change_wd = TRUE,
open_file = FALSE, project = site_dir)
expect_identical(getwd(), file.path(site_dir, "analysis"))
})
test_that("wflow_build only builds new file", {
html_mtime_pre <- file.mtime(html)
Sys.sleep(2)
suppressMessages(o <- wflow_build(view = FALSE, project = site_dir))
expect_identical(o$built, test_rmd)
expect_true(file.exists(test_html))
html_mtime_post <- file.mtime(html)
expect_identical(html_mtime_pre, html_mtime_post)
expect_silent(wflow_build(view = FALSE, project = site_dir))
})
test_that("wflow_view can open specific file with Rmd extension & without path.", {
expected <- file.path(s$docs, "first-analysis.html")
actual <- wflow_view("first-analysis.Rmd", dry_run = TRUE, project = site_dir)
expect_identical(actual, expected)
})
all_rmd <- rownames(s$status)
all_html <- workflowr:::to_html(all_rmd, outdir = s$docs)
test_that("wflow_publish can commit new file and website", {
html_mtime_pre <- file.mtime(all_html)
Sys.sleep(2)
expect_message(o <- wflow_publish(all_rmd,
message = "first analysis",
project = site_dir))
expect_identical(o$step2$built, all_rmd)
expect_true(all(file.exists(all_html)))
html_mtime_post <- file.mtime(all_html)
expect_true(all(html_mtime_pre < html_mtime_post))
log <- commits(r)
# browser()
expect_true(length(log) == 3)
expect_identical(log[[1]]@message, "Build site.")
expect_identical(log[[2]]@message, "first analysis")
expect_silent(wflow_build(make = TRUE, update = TRUE, view = FALSE,
project = site_dir))
})
|
/tests/testthat/test-vig-getting-started.R
|
permissive
|
anorris8/workflowr
|
R
| false
| false
| 3,478
|
r
|
context("vig-getting-started")
# Testing similar workflow as in getting-started vignette
library("git2r")
# start project in a tempdir
site_dir <- tempfile("new-")
suppressMessages(wflow_start(site_dir, change_wd = FALSE))
on.exit(unlink(site_dir, recursive = TRUE, force = TRUE))
site_dir <- workflowr:::absolute(site_dir)
r <- repository(path = site_dir)
s <- wflow_status(project = site_dir)
test_that("wflow_start provides necessary infrastructure", {
expect_true(dir.exists(file.path(site_dir, ".git")))
expect_true(dir.exists(file.path(site_dir, "analysis")))
expect_true(file.exists(file.path(site_dir, "analysis/_site.yml")))
expect_true(file.exists(file.path(site_dir, "analysis/index.Rmd")))
expect_true(file.exists(file.path(site_dir,
paste0(basename(site_dir), ".Rproj"))))
expect_true(length(commits(r)) == 1)
})
rmd <- rownames(s$status)
stopifnot(length(rmd) > 0)
# Expected html files
html <- workflowr:::to_html(rmd, outdir = s$docs)
test_that("wflow_build builds the website, but only once", {
suppressMessages(o <- wflow_build(view = FALSE, project = site_dir))
expect_identical(o$built, rmd)
expect_true(all(file.exists(html)))
expect_silent(o <- wflow_build(view = FALSE, project = site_dir))
})
test_that("wflow_view opens website.", {
expected <- file.path(s$docs, "index.html")
actual <- wflow_view(dry_run = TRUE, project = site_dir)
expect_identical(actual, expected)
})
test_rmd <- file.path(s$analysis, "first-analysis.Rmd")
file.copy("files/workflowr-template.Rmd", test_rmd)
# Expected html file
test_html <- workflowr:::to_html(test_rmd, outdir = s$docs)
s <- wflow_status(project = site_dir)
test_that("wflow_open sets correct working directory", {
cwd <- getwd()
on.exit(setwd(cwd))
wflow_open(files = basename(test_rmd), change_wd = TRUE,
open_file = FALSE, project = site_dir)
expect_identical(getwd(), file.path(site_dir, "analysis"))
})
test_that("wflow_build only builds new file", {
html_mtime_pre <- file.mtime(html)
Sys.sleep(2)
suppressMessages(o <- wflow_build(view = FALSE, project = site_dir))
expect_identical(o$built, test_rmd)
expect_true(file.exists(test_html))
html_mtime_post <- file.mtime(html)
expect_identical(html_mtime_pre, html_mtime_post)
expect_silent(wflow_build(view = FALSE, project = site_dir))
})
test_that("wflow_view can open specific file with Rmd extension & without path.", {
expected <- file.path(s$docs, "first-analysis.html")
actual <- wflow_view("first-analysis.Rmd", dry_run = TRUE, project = site_dir)
expect_identical(actual, expected)
})
all_rmd <- rownames(s$status)
all_html <- workflowr:::to_html(all_rmd, outdir = s$docs)
test_that("wflow_publish can commit new file and website", {
html_mtime_pre <- file.mtime(all_html)
Sys.sleep(2)
expect_message(o <- wflow_publish(all_rmd,
message = "first analysis",
project = site_dir))
expect_identical(o$step2$built, all_rmd)
expect_true(all(file.exists(all_html)))
html_mtime_post <- file.mtime(all_html)
expect_true(all(html_mtime_pre < html_mtime_post))
log <- commits(r)
# browser()
expect_true(length(log) == 3)
expect_identical(log[[1]]@message, "Build site.")
expect_identical(log[[2]]@message, "first analysis")
expect_silent(wflow_build(make = TRUE, update = TRUE, view = FALSE,
project = site_dir))
})
|
## makeCacheMatrix is a function that takes a square matrix as an argument and returns an object with a group of functions actionable upon it.
## this group of functions include set, get, setInverse, and getInverse
## by returning a list, the object's set of functions can be accessed by using the $ symbol
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(Inverse) m <<- Inverse
getInverse <- function() m
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data)
x$setInverse(m)
m
}
|
/cachematrix.R
|
no_license
|
wergild-1/ProgrammingAssignment2
|
R
| false
| false
| 1,036
|
r
|
## makeCacheMatrix is a function that takes a square matrix as an argument and returns an object with a group of functions actionable upon it.
## this group of functions include set, get, setInverse, and getInverse
## by returning a list, the object's set of functions can be accessed by using the $ symbol
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(Inverse) m <<- Inverse
getInverse <- function() m
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data)
x$setInverse(m)
m
}
|
\name{continue_OCN}
\alias{continue_OCN}
\title{Perform OCN Search Algorithm on an Existing OCN}
\usage{
continue_OCN(OCN,nNewIter, coolingRate=NULL, initialNoCoolingPhase=0,
displayUpdates=1, showIntermediatePlots=FALSE, thrADraw=NULL,
easyDraw=NULL, nUpdates=50)
}
\arguments{
\item{OCN}{A \code{river} object (as generated by \code{\link{create_OCN}}).}
\item{nNewIter}{Number of iterations that the OCN search algorithm performs.}
\item{coolingRate}{Parameter of the function used to describe the temperature of the simulated annealing algorithm. See \code{\link{create_OCN}}. If \code{NULL}, it is set equal to the last element of \code{OCN$coolingRate}.}
\item{initialNoCoolingPhase}{Parameter of the function used to describe the temperature of the simulated annealing algorithm. See \code{\link{create_OCN}}.}
\item{nUpdates}{Number of updates given during the OCN search process (only effective if \code{any(displayUpdates,showIntermediatePlots)=TRUE}.).}
\item{showIntermediatePlots}{If \code{TRUE}, the OCN plot is updated \code{nUpdates} times during the OCN search process.
Note that, for large lattices, \code{showIntermediatePlots = TRUE} might slow down the search process considerably (especially when \code{easyDraw = FALSE}).}
\item{thrADraw}{Threshold drainage area value used to display the network (only effective when \code{showIntermediatePlots = TRUE}).}
\item{easyDraw}{Logical. If \code{TRUE}, the whole network is displayed (when \code{showIntermediatePlots = TRUE}), and pixels with drainage area lower than \code{thrADraw} are displayed in light gray. If \code{FALSE}, only pixels with drainage area greater or equal to \code{thrADraw} are displayed. Default is \code{FALSE} if \code{dimX*dimY <= 40000}, and \code{TRUE} otherwise. Note that setting \code{easyDraw = FALSE} for large networks might slow down the process considerably.}
\item{displayUpdates}{State if updates are printed on the console while the OCN search algorithm runs.
\describe{
\item{\code{0}}{No update is given.}
\item{\code{1}}{An estimate of duration is given (only if \code{dimX*dimY > 1000}, otherwise no update is given).}
\item{\code{2}}{Progress updates are given. The number of these is controlled by \code{nUpdates}}
}}
}
\value{
A \code{river} object analogous to the input \code{OCN}. Note that, unlike in \code{\link{create_OCN}}, \code{OCN$coolingRate} and \code{OCN$initialNoCoolingPhase} are now vectors (of length equal to the number of times \code{\link{continue_OCN}} has been performed on the same OCN, plus one) that store the full sequence of \code{coolingRate}, \code{initialNoCoolingPhase} used to generate the OCN. Additionally, the vector \code{OCN$nIterSequence} is provided, with entries equal to the number of iterations performed by each successive application of \code{\link{create_OCN}} or \code{\link{continue_OCN}}. It is \code{OCN$nIter = sum(OCN$nIterSequence)}.
}
\description{
Function that performs the OCN search algorithm on an existing OCN.
}
\examples{
set.seed(1)
OCN_a <- create_OCN(20, 20, nIter = 10000)
set.seed(1)
OCN_b <- create_OCN(20, 20, nIter = 5000)
OCN_b <- continue_OCN(OCN_b, nNewIter = 5000)
old.par <- par(no.readonly = TRUE)
par(mfrow=c(1,2))
draw_simple_OCN(OCN_a)
draw_simple_OCN(OCN_b) # the two OCNs are equal
par(old.par)
}
|
/man/continue_OCN.Rd
|
no_license
|
lucarraro/OCNet
|
R
| false
| false
| 3,306
|
rd
|
\name{continue_OCN}
\alias{continue_OCN}
\title{Perform OCN Search Algorithm on an Existing OCN}
\usage{
continue_OCN(OCN,nNewIter, coolingRate=NULL, initialNoCoolingPhase=0,
displayUpdates=1, showIntermediatePlots=FALSE, thrADraw=NULL,
easyDraw=NULL, nUpdates=50)
}
\arguments{
\item{OCN}{A \code{river} object (as generated by \code{\link{create_OCN}}).}
\item{nNewIter}{Number of iterations that the OCN search algorithm performs.}
\item{coolingRate}{Parameter of the function used to describe the temperature of the simulated annealing algorithm. See \code{\link{create_OCN}}. If \code{NULL}, it is set equal to the last element of \code{OCN$coolingRate}.}
\item{initialNoCoolingPhase}{Parameter of the function used to describe the temperature of the simulated annealing algorithm. See \code{\link{create_OCN}}.}
\item{nUpdates}{Number of updates given during the OCN search process (only effective if \code{any(displayUpdates,showIntermediatePlots)=TRUE}.).}
\item{showIntermediatePlots}{If \code{TRUE}, the OCN plot is updated \code{nUpdates} times during the OCN search process.
Note that, for large lattices, \code{showIntermediatePlots = TRUE} might slow down the search process considerably (especially when \code{easyDraw = FALSE}).}
\item{thrADraw}{Threshold drainage area value used to display the network (only effective when \code{showIntermediatePlots = TRUE}).}
\item{easyDraw}{Logical. If \code{TRUE}, the whole network is displayed (when \code{showIntermediatePlots = TRUE}), and pixels with drainage area lower than \code{thrADraw} are displayed in light gray. If \code{FALSE}, only pixels with drainage area greater or equal to \code{thrADraw} are displayed. Default is \code{FALSE} if \code{dimX*dimY <= 40000}, and \code{TRUE} otherwise. Note that setting \code{easyDraw = FALSE} for large networks might slow down the process considerably.}
\item{displayUpdates}{State if updates are printed on the console while the OCN search algorithm runs.
\describe{
\item{\code{0}}{No update is given.}
\item{\code{1}}{An estimate of duration is given (only if \code{dimX*dimY > 1000}, otherwise no update is given).}
\item{\code{2}}{Progress updates are given. The number of these is controlled by \code{nUpdates}}
}}
}
\value{
A \code{river} object analogous to the input \code{OCN}. Note that, unlike in \code{\link{create_OCN}}, \code{OCN$coolingRate} and \code{OCN$initialNoCoolingPhase} are now vectors (of length equal to the number of times \code{\link{continue_OCN}} has been performed on the same OCN, plus one) that store the full sequence of \code{coolingRate}, \code{initialNoCoolingPhase} used to generate the OCN. Additionally, the vector \code{OCN$nIterSequence} is provided, with entries equal to the number of iterations performed by each successive application of \code{\link{create_OCN}} or \code{\link{continue_OCN}}. It is \code{OCN$nIter = sum(OCN$nIterSequence)}.
}
\description{
Function that performs the OCN search algorithm on an existing OCN.
}
\examples{
set.seed(1)
OCN_a <- create_OCN(20, 20, nIter = 10000)
set.seed(1)
OCN_b <- create_OCN(20, 20, nIter = 5000)
OCN_b <- continue_OCN(OCN_b, nNewIter = 5000)
old.par <- par(no.readonly = TRUE)
par(mfrow=c(1,2))
draw_simple_OCN(OCN_a)
draw_simple_OCN(OCN_b) # the two OCNs are equal
par(old.par)
}
|
#' Sign our from your 'shiny' app
#'
#' Call this function to sign a user out of your 'shiny' app. This function should
#' be called inside the server function of your 'shiny' app. See
#' \url{https://github.com/Tychobra/polished/blob/master/inst/examples/polished_example_01/server.R}
#' For an example of this function being called after the user clicks a "Sign Out"
#' button.
#'
#' @param session the Shiny session
#' @param redirect_page the query string for the page that the user should be redirected
#' to after signing out.
#'
#' @export
#'
#' @importFrom shiny updateQueryString getDefaultReactiveDomain
#'
#'
#'
sign_out_from_shiny <- function(
session = shiny::getDefaultReactiveDomain(),
redirect_page = "?page=sign_in"
) {
user <- session$userData$user()
if (is.null(user)) stop("session$userData$user() does not exist", call. = FALSE)
# sign the user out of polished
.global_sessions$sign_out(user$hashed_cookie, user$session_uid)
# set query string to sign in page
shiny::updateQueryString(
queryString = redirect_page,
session = session,
mode = "replace"
)
}
|
/R/sign_out_from_shiny.R
|
no_license
|
Yaswanth-Tippireddy/polished
|
R
| false
| false
| 1,113
|
r
|
#' Sign our from your 'shiny' app
#'
#' Call this function to sign a user out of your 'shiny' app. This function should
#' be called inside the server function of your 'shiny' app. See
#' \url{https://github.com/Tychobra/polished/blob/master/inst/examples/polished_example_01/server.R}
#' For an example of this function being called after the user clicks a "Sign Out"
#' button.
#'
#' @param session the Shiny session
#' @param redirect_page the query string for the page that the user should be redirected
#' to after signing out.
#'
#' @export
#'
#' @importFrom shiny updateQueryString getDefaultReactiveDomain
#'
#'
#'
sign_out_from_shiny <- function(
session = shiny::getDefaultReactiveDomain(),
redirect_page = "?page=sign_in"
) {
user <- session$userData$user()
if (is.null(user)) stop("session$userData$user() does not exist", call. = FALSE)
# sign the user out of polished
.global_sessions$sign_out(user$hashed_cookie, user$session_uid)
# set query string to sign in page
shiny::updateQueryString(
queryString = redirect_page,
session = session,
mode = "replace"
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/team_scrape.R
\name{GetTeamShotDashboard}
\alias{GetTeamShotDashboard}
\title{Team Shot Dashboard}
\usage{
GetTeamShotDashboard(split = "shot clock", ...)
}
\value{
data frame with team information by a variety of splits
}
\description{
Team Shot Dashboard
}
\examples{
# GetTeamShotDashboard(split = 'shot clock', TeamID = '1610612756')
}
\keyword{dashboard}
\keyword{shot}
\keyword{team}
|
/man/GetTeamShotDashboard.Rd
|
no_license
|
Zero2848/nbaTools
|
R
| false
| true
| 468
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/team_scrape.R
\name{GetTeamShotDashboard}
\alias{GetTeamShotDashboard}
\title{Team Shot Dashboard}
\usage{
GetTeamShotDashboard(split = "shot clock", ...)
}
\value{
data frame with team information by a variety of splits
}
\description{
Team Shot Dashboard
}
\examples{
# GetTeamShotDashboard(split = 'shot clock', TeamID = '1610612756')
}
\keyword{dashboard}
\keyword{shot}
\keyword{team}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/overlayDetections.R
\name{rasterText}
\alias{rasterText}
\title{raster the text in a image}
\usage{
rasterText(img, ...)
}
\arguments{
\item{img}{an \link[EBImage:Image-class]{Image} or \link{Image2}}
\item{\dots}{the parameters for \link[graphics:text]{text}}
}
\value{
an object of \link[EBImage:Image-class]{Image}.
}
\description{
add text labels in a image
}
\author{
Jianhong Ou
}
|
/man/rasterText.Rd
|
no_license
|
jianhong/cellCounter
|
R
| false
| true
| 466
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/overlayDetections.R
\name{rasterText}
\alias{rasterText}
\title{raster the text in a image}
\usage{
rasterText(img, ...)
}
\arguments{
\item{img}{an \link[EBImage:Image-class]{Image} or \link{Image2}}
\item{\dots}{the parameters for \link[graphics:text]{text}}
}
\value{
an object of \link[EBImage:Image-class]{Image}.
}
\description{
add text labels in a image
}
\author{
Jianhong Ou
}
|
##these two functions work together to cache the inverse of a matrix
##the first one creates a matrix object that is able to cache its inverse, yet unable to do the computation of inversion
makeCasheMatrix<-function(x=matrix()){
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function()x
setinv<-function(minv)m<<-minv
getinv<-function()m
list(set=set,get=get,setinv=setinv,
getinv=getinv)
}
##this function can compute the inverse of the matrix
##returned by the first function, and if the inverse has
##already been calculated,it can retrieve it from the cache
##
cacheSolve<-function(x,...){
m<-x$getinv
if(!is.null(m)){
message("getting cached data")
return(m)
}
data<-x$get()
m<-solve(data,...)
x$setinv(m)
m
}
|
/cachematrix.R
|
no_license
|
kakakalol/ProgrammingAssignment2
|
R
| false
| false
| 800
|
r
|
##these two functions work together to cache the inverse of a matrix
##the first one creates a matrix object that is able to cache its inverse, yet unable to do the computation of inversion
makeCasheMatrix<-function(x=matrix()){
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function()x
setinv<-function(minv)m<<-minv
getinv<-function()m
list(set=set,get=get,setinv=setinv,
getinv=getinv)
}
##this function can compute the inverse of the matrix
##returned by the first function, and if the inverse has
##already been calculated,it can retrieve it from the cache
##
cacheSolve<-function(x,...){
m<-x$getinv
if(!is.null(m)){
message("getting cached data")
return(m)
}
data<-x$get()
m<-solve(data,...)
x$setinv(m)
m
}
|
server <- function(input, output) {}
shinyApp(ui, server)
|
/CrimeMap/server.R
|
no_license
|
BingoLaHaye/CUAHAX2019
|
R
| false
| false
| 58
|
r
|
server <- function(input, output) {}
shinyApp(ui, server)
|
setwd("C:/Users/chandra.nakkalakunta/Documents")
date1 <- "1/2/2007"
date2<- "2/2/2007"
inputData<-read.csv("household_power_consumption.txt", header=T, sep=';',na.strings = "?")
subsetData<- subset(inputData, Date %in% c(date1,date2))
subsetData$Date <- as.Date(subsetData$Date, format="%d/%m/%Y")
datetime <- paste(as.Date(subsetData$Date), subsetData$Time)
subsetData$Datetime <- as.POSIXct(datetime)
with(subsetData, {
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
/Plot3.R
|
no_license
|
chandranakkalakunta/Exploratory-Data-Analytics
|
R
| false
| false
| 856
|
r
|
setwd("C:/Users/chandra.nakkalakunta/Documents")
date1 <- "1/2/2007"
date2<- "2/2/2007"
inputData<-read.csv("household_power_consumption.txt", header=T, sep=';',na.strings = "?")
subsetData<- subset(inputData, Date %in% c(date1,date2))
subsetData$Date <- as.Date(subsetData$Date, format="%d/%m/%Y")
datetime <- paste(as.Date(subsetData$Date), subsetData$Time)
subsetData$Datetime <- as.POSIXct(datetime)
with(subsetData, {
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Rfun_naive.R
\name{naive}
\alias{naive}
\title{Simple Linear Models for Rating and Ranking}
\usage{
naive(jpMat, stats = FALSE, ties.method = "average")
}
\arguments{
\item{jpMat}{a Judge-Presenter matrix, or a User-Movie matrix}
\item{stats}{a logical value to indicate whether a linear model should be fitted and the test statistics should be reported}
\item{ties.method}{a character string specifying how ties are treated, including "average", "first", "last", "random", "max", "min", from base::rank}
}
\description{
Calculate ratings and provide rankings using Simple Linear regression
}
\examples{
jpMat <- matrix(data=c(5,4,3,0, 5,5,3,1, 0,0,0,5, 0,0,2,0, 4,0,0,3, 1,0,0,4),
nrow=6,
byrow=TRUE)
result <- naive(jpMat)
print(result)
}
\references{
Gou, J. and Wu, S. (2020). A Judging System for Project Showcase: Rating and Ranking with Incomplete Information. Technical Report.
}
\author{
Jiangtao Gou
Shuyi Wu
}
|
/man/naive.Rd
|
no_license
|
cran/raincin
|
R
| false
| true
| 1,002
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Rfun_naive.R
\name{naive}
\alias{naive}
\title{Simple Linear Models for Rating and Ranking}
\usage{
naive(jpMat, stats = FALSE, ties.method = "average")
}
\arguments{
\item{jpMat}{a Judge-Presenter matrix, or a User-Movie matrix}
\item{stats}{a logical value to indicate whether a linear model should be fitted and the test statistics should be reported}
\item{ties.method}{a character string specifying how ties are treated, including "average", "first", "last", "random", "max", "min", from base::rank}
}
\description{
Calculate ratings and provide rankings using Simple Linear regression
}
\examples{
jpMat <- matrix(data=c(5,4,3,0, 5,5,3,1, 0,0,0,5, 0,0,2,0, 4,0,0,3, 1,0,0,4),
nrow=6,
byrow=TRUE)
result <- naive(jpMat)
print(result)
}
\references{
Gou, J. and Wu, S. (2020). A Judging System for Project Showcase: Rating and Ranking with Incomplete Information. Technical Report.
}
\author{
Jiangtao Gou
Shuyi Wu
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.14967427584229e-221, 0), .Dim = c(5L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615834905-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 270
|
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.14967427584229e-221, 0), .Dim = c(5L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result)
|
#links <- read.csv("links.csv")
movies <- read.csv("movies.csv",stringsAsFactors=FALSE)
ratings <- read.csv("ratings.csv")
#tags <- read.csv("tags.csv")
install.packages("recommenderlab")
library(recommenderlab)
library(ggplot2)
## Data pre-processing
genres <- as.data.frame(movies$genres, stringsAsFactors=FALSE)
library(data.table)
genres2 <- as.data.frame(tstrsplit(genres[,1], '[|]',
type.convert=TRUE),
stringsAsFactors=FALSE)
colnames(genres2) <- c(1:10)
genre_list <- c("Action", "Adventure", "Animation", "Children",
"Comedy", "Crime","Documentary", "Drama", "Fantasy",
"Film-Noir", "Horror", "Musical", "Mystery","Romance",
"Sci-Fi", "Thriller", "War", "Western") # we have 18 genres in total
genre_matrix <- matrix(0,10330,18) #empty matrix, 10330=no of movies+1, 18=no of genres
genre_matrix[1,] <- genre_list #set first row to genre list
colnames(genre_matrix) <- genre_list #set column names to genre list
#iterate through matrix
for (i in 1:nrow(genres2)) {
for (c in 1:ncol(genres2)) {
genmat_col = which(genre_matrix[1,] == genres2[i,c])
genre_matrix[i+1,genmat_col] <- 1
}
}
#convert into dataframe
genre_matrix2 <- as.data.frame(genre_matrix[-1,], stringsAsFactors=FALSE) #remove first row, which was the genre list
for (c in 1:ncol(genre_matrix2)) {
genre_matrix2[,c] <- as.integer(genre_matrix2[,c])
} #convert from characters to integers
#Create a matrix to search for a movie by genre:
years <- as.data.frame(movies$title, stringsAsFactors=FALSE)
library(data.table)
substrRight <- function(x, n){
substr(x, nchar(x)-n+1, nchar(x))
}
years <- as.data.frame(substr(substrRight(substrRight(years$`movies$title`, 6),5),1,4))
search_matrix <- cbind(movies[,1], substr(movies[,2],1,nchar(movies[,2])-6), years, genre_matrix2)
colnames(search_matrix) <- c("movieId", "title", "year", genre_list)
write.csv(search_matrix, "search.csv")
search_matrix <- read.csv("search.csv", stringsAsFactors=FALSE)
# Example of search an Action movie produced in 1995:
subset(search_matrix, Action == 1 & year == 1995)$title
## Create a user profile
binaryratings <- ratings
# ratings of 4 and 5 are mapped to 1,
# representing likes, and ratings of 3
# and below are mapped to -1, representing
# dislikes:
for (i in 1:nrow(binaryratings)){
if (binaryratings[i,3] > 3){
binaryratings[i,3] <- 1
}
else{
binaryratings[i,3] <- -1
}
}
# convert binaryratings matrix to the correct format:
binaryratings2 <- dcast(binaryratings, movieId~userId, value.var = "rating", na.rm=FALSE)
for (i in 1:ncol(binaryratings2)){
binaryratings2[which(is.na(binaryratings2[,i]) == TRUE),i] <- 0
}
binaryratings2 = binaryratings2[,-1] #remove movieIds col. Rows are movieIds, cols are userIds
#Remove rows that are not rated from movies dataset
movieIds <- length(unique(movies$movieId)) #10329
ratingmovieIds <- length(unique(ratings$movieId)) #10325
movies2 <- movies[-which((movies$movieId %in% ratings$movieId) == FALSE),]
rownames(movies2) <- NULL
#Remove rows that are not rated from genre_matrix2
genre_matrix3 <- genre_matrix2[-which((movies$movieId %in% ratings$movieId) == FALSE),]
rownames(genre_matrix3) <- NULL
# calculate the dot product of the genre matrix and
# the ratings matrix and obtain the user profiles
#Calculate dot product for User Profiles
result = matrix(0,18,668) # here, 668=no of users/raters, 18=no of genres
for (c in 1:ncol(binaryratings2)){
for (i in 1:ncol(genre_matrix3)){
result[i,c] <- sum((genre_matrix3[,i]) * (binaryratings2[,c])) #ratings per genre
}
}
#Convert to Binary scale
for (c in 1:ncol(result)){
for (i in 1:nrow(result)){
if (result[i,c] < 0){
result[i,c] <- 0
}
else {
result[i,c] <- 1
}
}
}
## Assume that users like similar items, and retrieve movies
# that are closest in similarity to a user's profile, which
# represents a user's preference for an item's feature.
# use Jaccard Distance to measure the similarity between user profiles
# The User-Based Collaborative Filtering Approach
library(reshape2)
#Create ratings matrix. Rows = userId, Columns = movieId
ratingmat <- dcast(ratings, userId~movieId, value.var = "rating", na.rm=FALSE)
ratingmat <- as.matrix(ratingmat[,-1]) #remove userIds
# Method: UBCF
# Similarity Calculation Method: Cosine Similarity
# Nearest Neighbors: 30
library(recommenderlab)
#Convert rating matrix into a recommenderlab sparse matrix
ratingmat <- as(ratingmat, "realRatingMatrix")
# Determine how similar the first four users are with each other
# create similarity matrix
similarity_users <- similarity(ratingmat[1:4, ],
method = "cosine",
which = "users")
as.matrix(similarity_users)
image(as.matrix(similarity_users), main = "User similarity")
# compute similarity between
# the first four movies
similarity_items <- similarity(ratingmat[, 1:4], method =
"cosine", which = "items")
as.matrix(similarity_items)
image(as.matrix(similarity_items), main = "Item similarity")
# Exploring values of ratings:
vector_ratings <- as.vector(ratingmat@data)
unique(vector_ratings) # what are unique values of ratings
table_ratings <- table(vector_ratings) # what is the count of each rating value
table_ratings
# Visualize the rating:
vector_ratings <- vector_ratings[vector_ratings != 0] # rating == 0 are NA values
vector_ratings <- factor(vector_ratings)
qplot(vector_ratings) +
ggtitle("Distribution of the ratings")
# Exploring viewings of movies:
views_per_movie <- colCounts(ratingmat) # count views for each movie
table_views <- data.frame(movie = names(views_per_movie),
views = views_per_movie) # create dataframe of views
table_views <- table_views[order(table_views$views,
decreasing = TRUE), ] # sort by number of views
ggplot(table_views[1:6, ], aes(x = movie, y = views)) +
geom_bar(stat="identity") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
scale_x_discrete(labels=subset(movies2, movies2$movieId == table_views$movie)$title) +
ggtitle("Number of views of the top movies")
#Visualizing the matrix:
image(ratingmat, main = "Heatmap of the rating matrix") # hard to read-too many dimensions
image(ratingmat[1:10, 1:15], main = "Heatmap of the first rows and columns")
image(ratingmat[rowCounts(ratingmat) > quantile(rowCounts(ratingmat), 0.99),
colCounts(ratingmat) > quantile(colCounts(ratingmat), 0.99)],
main = "Heatmap of the top users and movies")
#Normalize the data
ratingmat_norm <- normalize(ratingmat)
image(ratingmat_norm[rowCounts(ratingmat_norm) > quantile(rowCounts(ratingmat_norm), 0.99),
colCounts(ratingmat_norm) > quantile(colCounts(ratingmat_norm), 0.99)],
main = "Heatmap of the top users and movies")
#Create UBFC Recommender Model. UBCF stands for User-Based Collaborative Filtering
recommender_model <- Recommender(ratingmat_norm,
method = "UBCF",
param=list(method="Cosine",nn=30))
model_details <- getModel(recommender_model)
model_details$data
recom <- predict(recommender_model,
ratingmat[1],
n=10) #Obtain top 10 recommendations for 1st user in dataset
recom
#recc_matrix <- sapply(recom@items,
# function(x){ colnames(ratingmat)[x] })
#dim(recc_matrix)
recom_list <- as(recom,
"list") #convert recommenderlab object to readable list
#Obtain recommendations
recom_result <- matrix(0,10)
for (i in 1:10){
recom_result[i] <- as.character(subset(movies,
movies$movieId == as.integer(recom_list[[1]][i]))$title)
}
# Evaluation:
evaluation_scheme <- evaluationScheme(ratingmat,
method="cross-validation",
k=5, given=3,
goodRating=5) #k=5 meaning a 5-fold cross validation. given=3 meaning a Given-3 protocol
evaluation_results <- evaluate(evaluation_scheme,
method="UBCF",
n=c(1,3,5,10,15,20))
eval_results <- getConfusionMatrix(evaluation_results)[[1]]
|
/movieRec.R
|
no_license
|
ganeshsai08/project-
|
R
| false
| false
| 8,627
|
r
|
#links <- read.csv("links.csv")
movies <- read.csv("movies.csv",stringsAsFactors=FALSE)
ratings <- read.csv("ratings.csv")
#tags <- read.csv("tags.csv")
install.packages("recommenderlab")
library(recommenderlab)
library(ggplot2)
## Data pre-processing
genres <- as.data.frame(movies$genres, stringsAsFactors=FALSE)
library(data.table)
genres2 <- as.data.frame(tstrsplit(genres[,1], '[|]',
type.convert=TRUE),
stringsAsFactors=FALSE)
colnames(genres2) <- c(1:10)
genre_list <- c("Action", "Adventure", "Animation", "Children",
"Comedy", "Crime","Documentary", "Drama", "Fantasy",
"Film-Noir", "Horror", "Musical", "Mystery","Romance",
"Sci-Fi", "Thriller", "War", "Western") # we have 18 genres in total
genre_matrix <- matrix(0,10330,18) #empty matrix, 10330=no of movies+1, 18=no of genres
genre_matrix[1,] <- genre_list #set first row to genre list
colnames(genre_matrix) <- genre_list #set column names to genre list
#iterate through matrix
for (i in 1:nrow(genres2)) {
for (c in 1:ncol(genres2)) {
genmat_col = which(genre_matrix[1,] == genres2[i,c])
genre_matrix[i+1,genmat_col] <- 1
}
}
#convert into dataframe
genre_matrix2 <- as.data.frame(genre_matrix[-1,], stringsAsFactors=FALSE) #remove first row, which was the genre list
for (c in 1:ncol(genre_matrix2)) {
genre_matrix2[,c] <- as.integer(genre_matrix2[,c])
} #convert from characters to integers
#Create a matrix to search for a movie by genre:
years <- as.data.frame(movies$title, stringsAsFactors=FALSE)
library(data.table)
substrRight <- function(x, n){
substr(x, nchar(x)-n+1, nchar(x))
}
years <- as.data.frame(substr(substrRight(substrRight(years$`movies$title`, 6),5),1,4))
search_matrix <- cbind(movies[,1], substr(movies[,2],1,nchar(movies[,2])-6), years, genre_matrix2)
colnames(search_matrix) <- c("movieId", "title", "year", genre_list)
write.csv(search_matrix, "search.csv")
search_matrix <- read.csv("search.csv", stringsAsFactors=FALSE)
# Example of search an Action movie produced in 1995:
subset(search_matrix, Action == 1 & year == 1995)$title
## Create a user profile
binaryratings <- ratings
# ratings of 4 and 5 are mapped to 1,
# representing likes, and ratings of 3
# and below are mapped to -1, representing
# dislikes:
for (i in 1:nrow(binaryratings)){
if (binaryratings[i,3] > 3){
binaryratings[i,3] <- 1
}
else{
binaryratings[i,3] <- -1
}
}
# convert binaryratings matrix to the correct format:
binaryratings2 <- dcast(binaryratings, movieId~userId, value.var = "rating", na.rm=FALSE)
for (i in 1:ncol(binaryratings2)){
binaryratings2[which(is.na(binaryratings2[,i]) == TRUE),i] <- 0
}
binaryratings2 = binaryratings2[,-1] #remove movieIds col. Rows are movieIds, cols are userIds
#Remove rows that are not rated from movies dataset
movieIds <- length(unique(movies$movieId)) #10329
ratingmovieIds <- length(unique(ratings$movieId)) #10325
movies2 <- movies[-which((movies$movieId %in% ratings$movieId) == FALSE),]
rownames(movies2) <- NULL
#Remove rows that are not rated from genre_matrix2
genre_matrix3 <- genre_matrix2[-which((movies$movieId %in% ratings$movieId) == FALSE),]
rownames(genre_matrix3) <- NULL
# calculate the dot product of the genre matrix and
# the ratings matrix and obtain the user profiles
#Calculate dot product for User Profiles
result = matrix(0,18,668) # here, 668=no of users/raters, 18=no of genres
for (c in 1:ncol(binaryratings2)){
for (i in 1:ncol(genre_matrix3)){
result[i,c] <- sum((genre_matrix3[,i]) * (binaryratings2[,c])) #ratings per genre
}
}
#Convert to Binary scale
for (c in 1:ncol(result)){
for (i in 1:nrow(result)){
if (result[i,c] < 0){
result[i,c] <- 0
}
else {
result[i,c] <- 1
}
}
}
## Assume that users like similar items, and retrieve movies
# that are closest in similarity to a user's profile, which
# represents a user's preference for an item's feature.
# use Jaccard Distance to measure the similarity between user profiles
# The User-Based Collaborative Filtering Approach
library(reshape2)
#Create ratings matrix. Rows = userId, Columns = movieId
ratingmat <- dcast(ratings, userId~movieId, value.var = "rating", na.rm=FALSE)
ratingmat <- as.matrix(ratingmat[,-1]) #remove userIds
# Method: UBCF
# Similarity Calculation Method: Cosine Similarity
# Nearest Neighbors: 30
library(recommenderlab)
#Convert rating matrix into a recommenderlab sparse matrix
ratingmat <- as(ratingmat, "realRatingMatrix")
# Determine how similar the first four users are with each other
# create similarity matrix
similarity_users <- similarity(ratingmat[1:4, ],
method = "cosine",
which = "users")
as.matrix(similarity_users)
image(as.matrix(similarity_users), main = "User similarity")
# compute similarity between
# the first four movies
similarity_items <- similarity(ratingmat[, 1:4], method =
"cosine", which = "items")
as.matrix(similarity_items)
image(as.matrix(similarity_items), main = "Item similarity")
# Exploring values of ratings:
vector_ratings <- as.vector(ratingmat@data)
unique(vector_ratings) # what are unique values of ratings
table_ratings <- table(vector_ratings) # what is the count of each rating value
table_ratings
# Visualize the rating:
vector_ratings <- vector_ratings[vector_ratings != 0] # rating == 0 are NA values
vector_ratings <- factor(vector_ratings)
qplot(vector_ratings) +
ggtitle("Distribution of the ratings")
# Exploring viewings of movies:
views_per_movie <- colCounts(ratingmat) # count views for each movie
table_views <- data.frame(movie = names(views_per_movie),
views = views_per_movie) # create dataframe of views
table_views <- table_views[order(table_views$views,
decreasing = TRUE), ] # sort by number of views
ggplot(table_views[1:6, ], aes(x = movie, y = views)) +
geom_bar(stat="identity") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
scale_x_discrete(labels=subset(movies2, movies2$movieId == table_views$movie)$title) +
ggtitle("Number of views of the top movies")
#Visualizing the matrix:
image(ratingmat, main = "Heatmap of the rating matrix") # hard to read-too many dimensions
image(ratingmat[1:10, 1:15], main = "Heatmap of the first rows and columns")
image(ratingmat[rowCounts(ratingmat) > quantile(rowCounts(ratingmat), 0.99),
colCounts(ratingmat) > quantile(colCounts(ratingmat), 0.99)],
main = "Heatmap of the top users and movies")
#Normalize the data
ratingmat_norm <- normalize(ratingmat)
image(ratingmat_norm[rowCounts(ratingmat_norm) > quantile(rowCounts(ratingmat_norm), 0.99),
colCounts(ratingmat_norm) > quantile(colCounts(ratingmat_norm), 0.99)],
main = "Heatmap of the top users and movies")
#Create UBFC Recommender Model. UBCF stands for User-Based Collaborative Filtering
recommender_model <- Recommender(ratingmat_norm,
method = "UBCF",
param=list(method="Cosine",nn=30))
model_details <- getModel(recommender_model)
model_details$data
recom <- predict(recommender_model,
ratingmat[1],
n=10) #Obtain top 10 recommendations for 1st user in dataset
recom
#recc_matrix <- sapply(recom@items,
# function(x){ colnames(ratingmat)[x] })
#dim(recc_matrix)
recom_list <- as(recom,
"list") #convert recommenderlab object to readable list
#Obtain recommendations
recom_result <- matrix(0,10)
for (i in 1:10){
recom_result[i] <- as.character(subset(movies,
movies$movieId == as.integer(recom_list[[1]][i]))$title)
}
# Evaluation:
evaluation_scheme <- evaluationScheme(ratingmat,
method="cross-validation",
k=5, given=3,
goodRating=5) #k=5 meaning a 5-fold cross validation. given=3 meaning a Given-3 protocol
evaluation_results <- evaluate(evaluation_scheme,
method="UBCF",
n=c(1,3,5,10,15,20))
eval_results <- getConfusionMatrix(evaluation_results)[[1]]
|
dftmp<-dfs4[dfs4$id==vecid[1],]
dftmp<-dftmp[,c(1,6,7)]
dfmin<-data.frame(min=0:1439)
dfmin$id<-vecid[1]
dfmin$f1<-FALSE
for (i in 0:1439){
if(nrow(dftmp[dftmp$MealMinN0==i,])>=1){
dfsub<-subset(dftmp,(MealMinN0>=i & MealMinN0 <(i+(gap-1))))
if (nrow(dfsub)>0){
dfsub2<-ddply(dfsub,~id,summarise,Sum=sum(dfsub$EnergyKJ))
if (dfsub2$Sum>energy_threshold){
dfmin$f1[[i+1]]<-TRUE
}
}
}
}
#
# dfsub<-subset(dftmp,(MealMinN0>=900 & MealMinN0 <(900+(gap-1))))
# dfsub2<-aggregate(dfsub$EnergyKJAve,by=list(id=dfsub$id),
# FUN=sum, na.rm=TRUE)
# library(plyr)
# ddply(dfsub,~id,summarise,Sum=sum(dfsub$EnergyKJ))
dfmin$f2<-FALSE
for (i in 1:1440){
if (dfmin$f1[[i]]==TRUE){
dfmin$f2[[i]]<-TRUE
if (i!=1) {
tmp<-min((i-1),gap)
for (j in 1:tmp){
if(dfmin$f1[[i-j]]==TRUE){
dfmin$f2[[i]]<-FALSE
}
}
}
}
}
dfmin$hour<-trunc(dfmin$min/60)
# dfmin$f1<-NULL
# dfmin$min<-NULL
dfmin<-unique(dfmin[dfmin$f2==TRUE,])
# dfmin$f2<-NULL
dfidmin<-rbind(dfidmin,dfmin)
|
/Rcode/trying to understand the looping code.R
|
no_license
|
winterwang/LSHTMproject
|
R
| false
| false
| 1,122
|
r
|
dftmp<-dfs4[dfs4$id==vecid[1],]
dftmp<-dftmp[,c(1,6,7)]
dfmin<-data.frame(min=0:1439)
dfmin$id<-vecid[1]
dfmin$f1<-FALSE
for (i in 0:1439){
if(nrow(dftmp[dftmp$MealMinN0==i,])>=1){
dfsub<-subset(dftmp,(MealMinN0>=i & MealMinN0 <(i+(gap-1))))
if (nrow(dfsub)>0){
dfsub2<-ddply(dfsub,~id,summarise,Sum=sum(dfsub$EnergyKJ))
if (dfsub2$Sum>energy_threshold){
dfmin$f1[[i+1]]<-TRUE
}
}
}
}
#
# dfsub<-subset(dftmp,(MealMinN0>=900 & MealMinN0 <(900+(gap-1))))
# dfsub2<-aggregate(dfsub$EnergyKJAve,by=list(id=dfsub$id),
# FUN=sum, na.rm=TRUE)
# library(plyr)
# ddply(dfsub,~id,summarise,Sum=sum(dfsub$EnergyKJ))
dfmin$f2<-FALSE
for (i in 1:1440){
if (dfmin$f1[[i]]==TRUE){
dfmin$f2[[i]]<-TRUE
if (i!=1) {
tmp<-min((i-1),gap)
for (j in 1:tmp){
if(dfmin$f1[[i-j]]==TRUE){
dfmin$f2[[i]]<-FALSE
}
}
}
}
}
dfmin$hour<-trunc(dfmin$min/60)
# dfmin$f1<-NULL
# dfmin$min<-NULL
dfmin<-unique(dfmin[dfmin$f2==TRUE,])
# dfmin$f2<-NULL
dfidmin<-rbind(dfidmin,dfmin)
|
library(shiny)
library(tidyverse)
library(cowplot)
library(knitr)
## load all files
files <- list.files(path = "www", pattern = '*.Rdata')
lapply(files, function(x) load(paste0("www/", x), envir = globalenv()))
load("www/param_4.5.RData")
ui <- fluidPage(
titlePanel("Exemplification of a simulation study for ProBio"),
hr(),
selectInput("scen", h3("Select scenario"),
choices = list("Scenario 1" = "sim_scen1", "Scenario 2" = "sim_scen2",
"Scenario 3" = "sim_scen3", "Scenario 4" = "sim_scen4"),
selected = "sim_scen1"),
tabsetPanel(
tabPanel("Report",
htmlOutput("frame")
),
tabPanel("Indices",
sliderInput("month", "Select follow-up month",
min = 1, max = 35, value = 1),
tabsetPanel(
tabPanel("Summary",
fluidRow(
#column(4, uiOutput("month_b")),
column(5, uiOutput("month")),
column(5, uiOutput("month_a"))
),
br(),
h3("Randomization probabilities", align = "center"),
fluidRow(
#column(4, uiOutput('r_b'), style = "border-right: 1px solid"),
column(5, uiOutput('r'), style = "border-right: 1px solid"),
# column(5, div(id = "r-container",
# tags$img(src = "spinner.gif",
# id = "loading-spinner"),
# uiOutput('r'))),
column(5, uiOutput('r_a'), style = "border-right: 1px solid")
),
br(),
h3("Randomized participants (subtypes)", align = "center"),
fluidRow(
#column(4, uiOutput('n_tb'), style = "border-right: 1px solid"),
column(5, uiOutput('n_t'), style = "border-right: 1px solid"),
column(5, uiOutput('n_ta'), style = "border-right: 1px solid")
),
br(),
h3("Randomized participants (signatures)", align = "center"),
fluidRow(
#column(4, uiOutput('n_sb'), style = "border-right: 1px solid"),
column(5, uiOutput('n_s'), style = "border-right: 1px solid"),
column(5, uiOutput('n_sa'), style = "border-right: 1px solid")
),
br(),
h3("Number of progressions (subtype)", align = "center"),
fluidRow(
#column(4, uiOutput('delta_tb'), style = "border-right: 1px solid"),
column(5, uiOutput('delta_t'), style = "border-right: 1px solid"),
column(5, uiOutput('delta_ta'), style = "border-right: 1px solid")
),
br(),
h3("Accumulated person-month (subtype)", align = "center"),
fluidRow(
#column(4, uiOutput('time_tb'), style = "border-right: 1px solid"),
column(5, uiOutput('time_t'), style = "border-right: 1px solid"),
column(5, uiOutput('time_ta'), style = "border-right: 1px solid")
),
br(),
h3("Probability of superiority (subtype)", align = "center"),
fluidRow(
#column(4, uiOutput('p_tb'), style = "border-right: 1px solid"),
column(5, uiOutput('p_t'), style = "border-right: 1px solid"),
column(5, uiOutput('p_ta'), style = "border-right: 1px solid")
),
br(),
h3("Probability of superiority (signature)", align = "center"),
fluidRow(
#column(4, uiOutput('p_sb'), style = "border-right: 1px solid"),
column(5,
uiOutput('p_s'),
plotOutput("density_s", height = "600px"),
style = "border-right: 1px solid"),
column(5, uiOutput('p_sa'),
plotOutput("density_sa", height = "600px"),
style = "border-right: 1px solid")
)
),
tabPanel("Data",
uiOutput("dat_title"),
dataTableOutput("dat_m")
)
)
),
tabPanel("Stat",
tags$iframe(src="model_exemplification.pdf", width="900", height="1000")
)
)
)
server <- function(input, output, session) {
output$frame <- renderUI({
tags$iframe(src = paste0("./", input$scen, ".html"), # put myMarkdown.html to /www
width = '100%', height = '800px',
frameborder = 0, scrolling = 'auto'
)
})
dat <- reactive({
# if (input$scen == "sim_scen1"){
# sim_scen1
# }
eval(as.name(input$scen))
})
output$month <- renderUI(
h3(paste("Selected Month:", input$month), align = "center")
)
output$month_a <- renderUI(
h3(paste("Month after:", input$month + 1), align = "center")
)
output$r <- renderTable(
dat()$results_inter[[input$month]]$post$r, spacing = "xs", digits = 2, rownames = TRUE
)
output$r_a <- renderTable(
dat()$results_inter[[input$month + 1]]$post$r, spacing = "xs", digits = 2, rownames = TRUE
)
output$n_t <- renderTable(
dat()$results_inter[[input$month]]$dta$n$n_type, spacing = "xs", digits = 0, rownames = TRUE
)
output$n_ta <- renderTable(
dat()$results_inter[[input$month + 1]]$dta$n$n_type, spacing = "xs", digits = 0, rownames = TRUE
)
output$n_s <- renderTable(
dat()$results_inter[[input$month]]$dta$n$n_sign, spacing = "xs", digits = 0, rownames = TRUE
)
output$n_sa <- renderTable(
dat()$results_inter[[input$month + 1]]$dta$n$n_sign, spacing = "xs", digits = 0, rownames = TRUE
)
delta_t <- reactive({
delta <- dat()$results_inter[[input$month]]$dta$delta
dimnames(delta) <- dimnames(scheme_type)
delta
})
delta_ta <- reactive({
delta <- dat()$results_inter[[input$month + 1]]$dta$delta
dimnames(delta) <- dimnames(scheme_type)
delta
})
output$delta_t <- renderTable(
delta_t(), spacing = "xs", digits = 0, rownames = TRUE
)
output$delta_ta <- renderTable(
delta_ta(), spacing = "xs", digits = 0, rownames = TRUE
)
time_t <- reactive({
time_t <- dat()$results_inter[[input$month]]$dta$PT
dimnames(time_t) <- dimnames(scheme_type)
time_t
})
time_ta <- reactive({
time_t <- dat()$results_inter[[input$month + 1]]$dta$PT
dimnames(time_t) <- dimnames(scheme_type)
time_t
})
output$time_t <- renderTable(
time_t(), spacing = "xs", digits = 1, rownames = TRUE
)
output$time_ta <- renderTable(
time_ta(), spacing = "xs", digits = 1, rownames = TRUE
)
output$p_t <- renderTable(
dat()$results_inter[[input$month]]$post$p_type, spacing = "xs", digits = 2, rownames = TRUE
)
output$p_ta <- renderTable(
dat()$results_inter[[input$month + 1]]$post$p_type, spacing = "xs", digits = 2, rownames = TRUE
)
output$p_s <- renderTable(
dat()$results_inter[[input$month]]$post$p_sign, spacing = "xs", digits = 2, rownames = TRUE
)
output$p_sa <- renderTable(
dat()$results_inter[[input$month + 1]]$post$p_sign, spacing = "xs", digits = 2, rownames = TRUE
)
output$density_s <- renderPlot({
set.seed(190211)
mu_dat <- lapply(1:1000, function(i){
unlist(Map(function(a, b){
l <- rgamma(1, shape = a, rate = b)
1/l^(1/shape)*gamma(1 + 1/shape)
}, dat()$results_inter[[input$month]]$post$prior$a,
dat()$results_inter[[input$month]]$post$prior$b
)) %>% matrix(., ncol = X, nrow = G, dimnames = dimnames(scheme_type))
})
mu_sign <- lapply(mu_dat, function(m)
t(apply(subtypes[, 6:(5 + nrow(signatures))], 2, function(x) colSums(m*x)/sum(x)))
) %>% do.call("rbind", .)
gglist <- data.frame(mu_sign) %>%
cbind(signature = rownames(mu_sign)) %>%
gather(trt, pfs, -signature) %>%
mutate(sig = signature) %>%
split(., .$signature) %>%
lapply(., function(d)
ggplot(d, aes(x = pfs, color = trt)) +
geom_line(stat = "density") +
labs(title = d$sig[1]) +
theme(legend.position = "none")
)
legend <- get_legend(gglist[[1]] + theme(legend.position = "top"))
plot_grid(plotlist = c(gglist, list(legend)), nrow = 3, ncol = 2)
})
output$density_sa <- renderPlot({
set.seed(190111)
mu_dat <- lapply(1:1000, function(i){
unlist(Map(function(a, b){
l <- rgamma(1, shape = a, rate = b)
1/l^(1/shape)*gamma(1 + 1/shape)
}, dat()$results_inter[[input$month + 1]]$post$prior$a,
dat()$results_inter[[input$month + 1]]$post$prior$b
)) %>% matrix(., ncol = X, nrow = G, dimnames = dimnames(scheme_type))
})
mu_sign <- lapply(mu_dat, function(m)
t(apply(subtypes[, 6:(5 + nrow(signatures))], 2, function(x) colSums(m*x)/sum(x)))
) %>%
do.call("rbind", .)
gglist <- data.frame(mu_sign) %>%
cbind(signature = rownames(mu_sign)) %>%
gather(trt, pfs, -signature) %>%
mutate(sig = signature) %>%
split(., .$signature) %>%
lapply(., function(d)
ggplot(d, aes(x = pfs, color = trt)) +
geom_line(stat = "density") +
labs(title = d$sig[1]) +
theme(legend.position = "none")
)
legend <- get_legend(gglist[[1]] + theme(legend.position = "top"))
plot_grid(plotlist = c(gglist, list(legend)), nrow = 3, ncol = 2)
})
output$dat_title <- renderUI(
h3(paste("Data up to the end of month", input$month ), align = "center")
)
output$dat_m <- renderDataTable({
dat()$dta$sim_dta
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
/sim_probio/app.R
|
no_license
|
alecri/shiny-server
|
R
| false
| false
| 10,418
|
r
|
library(shiny)
library(tidyverse)
library(cowplot)
library(knitr)
## load all files
files <- list.files(path = "www", pattern = '*.Rdata')
lapply(files, function(x) load(paste0("www/", x), envir = globalenv()))
load("www/param_4.5.RData")
ui <- fluidPage(
titlePanel("Exemplification of a simulation study for ProBio"),
hr(),
selectInput("scen", h3("Select scenario"),
choices = list("Scenario 1" = "sim_scen1", "Scenario 2" = "sim_scen2",
"Scenario 3" = "sim_scen3", "Scenario 4" = "sim_scen4"),
selected = "sim_scen1"),
tabsetPanel(
tabPanel("Report",
htmlOutput("frame")
),
tabPanel("Indices",
sliderInput("month", "Select follow-up month",
min = 1, max = 35, value = 1),
tabsetPanel(
tabPanel("Summary",
fluidRow(
#column(4, uiOutput("month_b")),
column(5, uiOutput("month")),
column(5, uiOutput("month_a"))
),
br(),
h3("Randomization probabilities", align = "center"),
fluidRow(
#column(4, uiOutput('r_b'), style = "border-right: 1px solid"),
column(5, uiOutput('r'), style = "border-right: 1px solid"),
# column(5, div(id = "r-container",
# tags$img(src = "spinner.gif",
# id = "loading-spinner"),
# uiOutput('r'))),
column(5, uiOutput('r_a'), style = "border-right: 1px solid")
),
br(),
h3("Randomized participants (subtypes)", align = "center"),
fluidRow(
#column(4, uiOutput('n_tb'), style = "border-right: 1px solid"),
column(5, uiOutput('n_t'), style = "border-right: 1px solid"),
column(5, uiOutput('n_ta'), style = "border-right: 1px solid")
),
br(),
h3("Randomized participants (signatures)", align = "center"),
fluidRow(
#column(4, uiOutput('n_sb'), style = "border-right: 1px solid"),
column(5, uiOutput('n_s'), style = "border-right: 1px solid"),
column(5, uiOutput('n_sa'), style = "border-right: 1px solid")
),
br(),
h3("Number of progressions (subtype)", align = "center"),
fluidRow(
#column(4, uiOutput('delta_tb'), style = "border-right: 1px solid"),
column(5, uiOutput('delta_t'), style = "border-right: 1px solid"),
column(5, uiOutput('delta_ta'), style = "border-right: 1px solid")
),
br(),
h3("Accumulated person-month (subtype)", align = "center"),
fluidRow(
#column(4, uiOutput('time_tb'), style = "border-right: 1px solid"),
column(5, uiOutput('time_t'), style = "border-right: 1px solid"),
column(5, uiOutput('time_ta'), style = "border-right: 1px solid")
),
br(),
h3("Probability of superiority (subtype)", align = "center"),
fluidRow(
#column(4, uiOutput('p_tb'), style = "border-right: 1px solid"),
column(5, uiOutput('p_t'), style = "border-right: 1px solid"),
column(5, uiOutput('p_ta'), style = "border-right: 1px solid")
),
br(),
h3("Probability of superiority (signature)", align = "center"),
fluidRow(
#column(4, uiOutput('p_sb'), style = "border-right: 1px solid"),
column(5,
uiOutput('p_s'),
plotOutput("density_s", height = "600px"),
style = "border-right: 1px solid"),
column(5, uiOutput('p_sa'),
plotOutput("density_sa", height = "600px"),
style = "border-right: 1px solid")
)
),
tabPanel("Data",
uiOutput("dat_title"),
dataTableOutput("dat_m")
)
)
),
tabPanel("Stat",
tags$iframe(src="model_exemplification.pdf", width="900", height="1000")
)
)
)
server <- function(input, output, session) {
output$frame <- renderUI({
tags$iframe(src = paste0("./", input$scen, ".html"), # put myMarkdown.html to /www
width = '100%', height = '800px',
frameborder = 0, scrolling = 'auto'
)
})
dat <- reactive({
# if (input$scen == "sim_scen1"){
# sim_scen1
# }
eval(as.name(input$scen))
})
output$month <- renderUI(
h3(paste("Selected Month:", input$month), align = "center")
)
output$month_a <- renderUI(
h3(paste("Month after:", input$month + 1), align = "center")
)
output$r <- renderTable(
dat()$results_inter[[input$month]]$post$r, spacing = "xs", digits = 2, rownames = TRUE
)
output$r_a <- renderTable(
dat()$results_inter[[input$month + 1]]$post$r, spacing = "xs", digits = 2, rownames = TRUE
)
output$n_t <- renderTable(
dat()$results_inter[[input$month]]$dta$n$n_type, spacing = "xs", digits = 0, rownames = TRUE
)
output$n_ta <- renderTable(
dat()$results_inter[[input$month + 1]]$dta$n$n_type, spacing = "xs", digits = 0, rownames = TRUE
)
output$n_s <- renderTable(
dat()$results_inter[[input$month]]$dta$n$n_sign, spacing = "xs", digits = 0, rownames = TRUE
)
output$n_sa <- renderTable(
dat()$results_inter[[input$month + 1]]$dta$n$n_sign, spacing = "xs", digits = 0, rownames = TRUE
)
delta_t <- reactive({
delta <- dat()$results_inter[[input$month]]$dta$delta
dimnames(delta) <- dimnames(scheme_type)
delta
})
delta_ta <- reactive({
delta <- dat()$results_inter[[input$month + 1]]$dta$delta
dimnames(delta) <- dimnames(scheme_type)
delta
})
output$delta_t <- renderTable(
delta_t(), spacing = "xs", digits = 0, rownames = TRUE
)
output$delta_ta <- renderTable(
delta_ta(), spacing = "xs", digits = 0, rownames = TRUE
)
time_t <- reactive({
time_t <- dat()$results_inter[[input$month]]$dta$PT
dimnames(time_t) <- dimnames(scheme_type)
time_t
})
time_ta <- reactive({
time_t <- dat()$results_inter[[input$month + 1]]$dta$PT
dimnames(time_t) <- dimnames(scheme_type)
time_t
})
output$time_t <- renderTable(
time_t(), spacing = "xs", digits = 1, rownames = TRUE
)
output$time_ta <- renderTable(
time_ta(), spacing = "xs", digits = 1, rownames = TRUE
)
output$p_t <- renderTable(
dat()$results_inter[[input$month]]$post$p_type, spacing = "xs", digits = 2, rownames = TRUE
)
output$p_ta <- renderTable(
dat()$results_inter[[input$month + 1]]$post$p_type, spacing = "xs", digits = 2, rownames = TRUE
)
output$p_s <- renderTable(
dat()$results_inter[[input$month]]$post$p_sign, spacing = "xs", digits = 2, rownames = TRUE
)
output$p_sa <- renderTable(
dat()$results_inter[[input$month + 1]]$post$p_sign, spacing = "xs", digits = 2, rownames = TRUE
)
output$density_s <- renderPlot({
set.seed(190211)
mu_dat <- lapply(1:1000, function(i){
unlist(Map(function(a, b){
l <- rgamma(1, shape = a, rate = b)
1/l^(1/shape)*gamma(1 + 1/shape)
}, dat()$results_inter[[input$month]]$post$prior$a,
dat()$results_inter[[input$month]]$post$prior$b
)) %>% matrix(., ncol = X, nrow = G, dimnames = dimnames(scheme_type))
})
mu_sign <- lapply(mu_dat, function(m)
t(apply(subtypes[, 6:(5 + nrow(signatures))], 2, function(x) colSums(m*x)/sum(x)))
) %>% do.call("rbind", .)
gglist <- data.frame(mu_sign) %>%
cbind(signature = rownames(mu_sign)) %>%
gather(trt, pfs, -signature) %>%
mutate(sig = signature) %>%
split(., .$signature) %>%
lapply(., function(d)
ggplot(d, aes(x = pfs, color = trt)) +
geom_line(stat = "density") +
labs(title = d$sig[1]) +
theme(legend.position = "none")
)
legend <- get_legend(gglist[[1]] + theme(legend.position = "top"))
plot_grid(plotlist = c(gglist, list(legend)), nrow = 3, ncol = 2)
})
output$density_sa <- renderPlot({
set.seed(190111)
mu_dat <- lapply(1:1000, function(i){
unlist(Map(function(a, b){
l <- rgamma(1, shape = a, rate = b)
1/l^(1/shape)*gamma(1 + 1/shape)
}, dat()$results_inter[[input$month + 1]]$post$prior$a,
dat()$results_inter[[input$month + 1]]$post$prior$b
)) %>% matrix(., ncol = X, nrow = G, dimnames = dimnames(scheme_type))
})
mu_sign <- lapply(mu_dat, function(m)
t(apply(subtypes[, 6:(5 + nrow(signatures))], 2, function(x) colSums(m*x)/sum(x)))
) %>%
do.call("rbind", .)
gglist <- data.frame(mu_sign) %>%
cbind(signature = rownames(mu_sign)) %>%
gather(trt, pfs, -signature) %>%
mutate(sig = signature) %>%
split(., .$signature) %>%
lapply(., function(d)
ggplot(d, aes(x = pfs, color = trt)) +
geom_line(stat = "density") +
labs(title = d$sig[1]) +
theme(legend.position = "none")
)
legend <- get_legend(gglist[[1]] + theme(legend.position = "top"))
plot_grid(plotlist = c(gglist, list(legend)), nrow = 3, ncol = 2)
})
output$dat_title <- renderUI(
h3(paste("Data up to the end of month", input$month ), align = "center")
)
output$dat_m <- renderDataTable({
dat()$dta$sim_dta
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
## R-script to analyse dipper population
library(esd)
library(ncdf4)
examine <- FALSE
print('read dipper data')
dipper <- read.table('~/Dropbox/data/dipper.csv',header=TRUE)
dipper <- zoo(x=dipper[[2]],order.by=dipper[[1]])
y <- as.station(dipper,loc='Lyngdalselva',
lon=7,lat=58.5,alt=520,
param='population',unit='count',
reference='Marlène Gamelon')
predictor <- retrieve("air.mon.mean.nc",lon=c(-1,17),lat=c(57,62))
## Only use the winter temperature:
predictor <- aggregate(subset(predictor,it='djf'),year,FUN='mean')
if (examine) {
print('dipper statistical distribution')
## Check the distribution
n <- seq(0,150,by=10); mu <- mean(dipper)
hist(coredata(dipper),breaks=n,freq=FALSE,col='grey')
lines(x,dpois(x,mu),lwd=2,col='red')
## Single ESD for inspection
X <- EOF(predictor)
z <- DS(y,X)
plot(z)
}
## Downscale the dipper population directly based on the large-scale
## annual mean temperature
print('Downscaled results')
if (!file.exists('dipper.Z.rcp45.rda')) {
Z.rcp45 <- DSensemble.annual(y,biascorrect=TRUE,
predictor=predictor,
lon=c(-1,17),lat=c(57,62),
abscoords=TRUE)
save(file='dipper.Z.rcp45.rda',Z.rcp45)
} else load('dipper.Z.rcp45.rda')
if (!file.exists('dipper.Z.rcp85.rda')) {
Z.rcp85 <- DSensemble.annual(y,biascorrect=TRUE,
rcp="rcp85",predictor=predictor,
lon=c(-1,17),lat=c(57,62),
abscoords=TRUE)
save(file='dipper.Z.rcp85.rda',Z.rcp85)
} else load('dipper.Z.rcp85.rda')
if (!file.exists('dipper.Z.rcp26.rda')) {
Z.rcp26 <- DSensemble.annual(y,biascorrect=TRUE,
rcp="rcp26",predictor=predictor,
lon=c(-1,17),lat=c(57,62),
abscoords=TRUE)
save(file='dipper.Z.rcp26.rda',Z.rcp26)
} else load('dipper.Z.rcp26.rda')
print('ensemble statistics')
year <- year(Z.rcp45)
ci90.rcp45 <- apply(coredata(Z.rcp45),1,quantile,
probs=c(0.05,0.95),na.rm=TRUE)
ci90.rcp26 <- apply(coredata(Z.rcp26),1,quantile,
probs=c(0.05,0.95),na.rm=TRUE)
ci90.rcp85 <- apply(coredata(Z.rcp85),1,quantile,
probs=c(0.05,0.95),na.rm=TRUE)
print('plotting')
dev.new()
par(bty='n')
plot(range(year),range(ci90.rcp45,ci90.rcp26,ci90.rcp85,y),
type='n',xlab='',ylab='Population',main='Dipper',
sub=loc(y))
grid()
polygon(c(year,rev(year)),c(ci90.rcp85[1,],rev(ci90.rcp85[2,])),
col=rgb(1,0.5,0,0.3),border=rgb(1,0.5,0))
polygon(c(year,rev(year)),c(ci90.rcp45[1,],rev(ci90.rcp45[2,])),
col=rgb(0.5,1,0,0.3),border=rgb(0.5,1,0))
polygon(c(year,rev(year)),c(ci90.rcp26[1,],rev(ci90.rcp26[2,])),
col=rgb(0,0.5,1,0.3),border=rgb(0,0.5,1))
lines(year(y),coredata(y),lwd=4,pch=19,type='b')
legend(1900,200,c('RCP2.6','RCP4.5','RCP8.5'),
col=c(rgb(0,0.5,1),rgb(0.5,1,0),rgb(1,0.5,0)),
lwd=7,lty=1,bty='n')
legend(1905,170,'observed',lwd=4,pch=19,lty=1,bty='n')
dev.copy2pdf(file='dipper.pdf')
|
/dipper/dipper.R
|
no_license
|
metno/esd_Rmarkdown
|
R
| false
| false
| 3,222
|
r
|
## R-script to analyse dipper population
library(esd)
library(ncdf4)
examine <- FALSE
print('read dipper data')
dipper <- read.table('~/Dropbox/data/dipper.csv',header=TRUE)
dipper <- zoo(x=dipper[[2]],order.by=dipper[[1]])
y <- as.station(dipper,loc='Lyngdalselva',
lon=7,lat=58.5,alt=520,
param='population',unit='count',
reference='Marlène Gamelon')
predictor <- retrieve("air.mon.mean.nc",lon=c(-1,17),lat=c(57,62))
## Only use the winter temperature:
predictor <- aggregate(subset(predictor,it='djf'),year,FUN='mean')
if (examine) {
print('dipper statistical distribution')
## Check the distribution
n <- seq(0,150,by=10); mu <- mean(dipper)
hist(coredata(dipper),breaks=n,freq=FALSE,col='grey')
lines(x,dpois(x,mu),lwd=2,col='red')
## Single ESD for inspection
X <- EOF(predictor)
z <- DS(y,X)
plot(z)
}
## Downscale the dipper population directly based on the large-scale
## annual mean temperature
print('Downscaled results')
if (!file.exists('dipper.Z.rcp45.rda')) {
Z.rcp45 <- DSensemble.annual(y,biascorrect=TRUE,
predictor=predictor,
lon=c(-1,17),lat=c(57,62),
abscoords=TRUE)
save(file='dipper.Z.rcp45.rda',Z.rcp45)
} else load('dipper.Z.rcp45.rda')
if (!file.exists('dipper.Z.rcp85.rda')) {
Z.rcp85 <- DSensemble.annual(y,biascorrect=TRUE,
rcp="rcp85",predictor=predictor,
lon=c(-1,17),lat=c(57,62),
abscoords=TRUE)
save(file='dipper.Z.rcp85.rda',Z.rcp85)
} else load('dipper.Z.rcp85.rda')
if (!file.exists('dipper.Z.rcp26.rda')) {
Z.rcp26 <- DSensemble.annual(y,biascorrect=TRUE,
rcp="rcp26",predictor=predictor,
lon=c(-1,17),lat=c(57,62),
abscoords=TRUE)
save(file='dipper.Z.rcp26.rda',Z.rcp26)
} else load('dipper.Z.rcp26.rda')
print('ensemble statistics')
year <- year(Z.rcp45)
ci90.rcp45 <- apply(coredata(Z.rcp45),1,quantile,
probs=c(0.05,0.95),na.rm=TRUE)
ci90.rcp26 <- apply(coredata(Z.rcp26),1,quantile,
probs=c(0.05,0.95),na.rm=TRUE)
ci90.rcp85 <- apply(coredata(Z.rcp85),1,quantile,
probs=c(0.05,0.95),na.rm=TRUE)
print('plotting')
dev.new()
par(bty='n')
plot(range(year),range(ci90.rcp45,ci90.rcp26,ci90.rcp85,y),
type='n',xlab='',ylab='Population',main='Dipper',
sub=loc(y))
grid()
polygon(c(year,rev(year)),c(ci90.rcp85[1,],rev(ci90.rcp85[2,])),
col=rgb(1,0.5,0,0.3),border=rgb(1,0.5,0))
polygon(c(year,rev(year)),c(ci90.rcp45[1,],rev(ci90.rcp45[2,])),
col=rgb(0.5,1,0,0.3),border=rgb(0.5,1,0))
polygon(c(year,rev(year)),c(ci90.rcp26[1,],rev(ci90.rcp26[2,])),
col=rgb(0,0.5,1,0.3),border=rgb(0,0.5,1))
lines(year(y),coredata(y),lwd=4,pch=19,type='b')
legend(1900,200,c('RCP2.6','RCP4.5','RCP8.5'),
col=c(rgb(0,0.5,1),rgb(0.5,1,0),rgb(1,0.5,0)),
lwd=7,lty=1,bty='n')
legend(1905,170,'observed',lwd=4,pch=19,lty=1,bty='n')
dev.copy2pdf(file='dipper.pdf')
|
# The function 'makeCacheMatrix' creates a list containing a function to
# 1. set the value of the vector
# 2. get the value of the vector
# 3. set the value of the mean
# 4. get the value of the mean
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
# The function 'cacheSolve' first checks to see if the
# inverse of the matrix has already been calculated. If so, it gets the result from the
# cache and skips the computation. Otherwise, it calculates the inverse
# and sets the value of the mean in the cache via the 'setinv' function.
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
/cachematrix.R
|
no_license
|
redtealongan/ProgrammingAssignment2
|
R
| false
| false
| 966
|
r
|
# The function 'makeCacheMatrix' creates a list containing a function to
# 1. set the value of the vector
# 2. get the value of the vector
# 3. set the value of the mean
# 4. get the value of the mean
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
# The function 'cacheSolve' first checks to see if the
# inverse of the matrix has already been calculated. If so, it gets the result from the
# cache and skips the computation. Otherwise, it calculates the inverse
# and sets the value of the mean in the cache via the 'setinv' function.
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
\name{PLD_interface}
\alias{PLD_interface}
\title{
Model parameters and internal locations estimations
}
\description{
Function that estimates model parameters, genealogies and internal locations through Bayesian Markov chain Monte Carlo (MCMC) algorithm.
}
\usage{
PLD_interface(fileTREES, fileDATA, num_step = 1e+05, freq = 100,
burnin = 0, ess_lim = 100, sigma = NA, lambda = NA, tau = NA,
num_step_sigma = 1, num_step_lambda = 1, num_step_tau = 1,
id_filena = NA, pattern_trees_likelihood = "treeLikelihood",
names_locations = NA)
}
\arguments{
\item{fileTREES}{
a character string that specifies the name of the file containing the phylogenetic trees.
A Nexus file (e.g. BEAST output) with tips names (no space characters allowed), the phylogenetic trees and the trees likelihood.
If only one tree is contained in the file, the genealogies are not sampled in the MCMC.
}
\item{fileDATA}{
a character string that specifies the name of the file containing the tips locations : a text file with 3 columns (separated by tabs).
The first one with the tips names (the same as in \code{fileTrees}), the second one with the location latitudes and the last one with the location longitudes (both in decimal degrees).
No header.
}
\item{num_step}{
a strictly positive integer specifying the length of the Markov Chain (the number of MCMC steps).
Suggested values : 50 000 - 100 000.
}
\item{freq}{
a strictly positive integer specifying how often the sampled model parameters and the locations are saved.
Suggested values : 50 - 100.
}
\item{burnin}{
a strictly positive integer specifying the number of trees dropped from the file \code{fileTREES}.
Suggested value : 10\% of the total number of trees, 0 in case of only one tree.
}
\item{ess_lim}{
a strictly positive integer specifying the Effective Sample Size.
The MCMC stops when the ess of all parameters reach this value.
}
\item{sigma}{
a vector of two elements, strictly positive values specifying the standard deviation of the normal distribution for dispersal steps in each dimension.
Default is NA: the 2 sigma parameters are estimated in the MCMC.
Alternatively, if values are specified the sigma parameters are not estimated by the MCMC.
}
\item{lambda}{
a strictly positive value specifying the probability of migrating to an occupied location.
Default is NA: the lambda parameter is estimated in the MCMC.
Alternatively, if a value is specified the lambda parameter is not estimated by the MCMC.
A lambda value equal to 1 indicates that competitive exclusion did not impact the colonization process.
}
\item{tau}{
a strictly positive value specifying the overall dispersal rate.
Default is NA: the tau parameter is estimated in the MCMC.
Alternatively, if a value specified the tau parameter is not estimated by the MCMC.
}
\item{num_step_sigma}{
a strictly positive integer specifying how many times the sigma parameters are sampled at each MCMC step.
}
\item{num_step_lambda}{
a strictly positive integer specifying how many times the lambda parameter is sampled at each MCMC step.
}
\item{num_step_tau}{
a strictly positive integer specifying how many times the tau parameter is sampled at each MCMC step.
}
\item{id_filena}{
an optional character string specifying the ID for the output files.
}
\item{pattern_trees_likelihood }{
a character string specifying the text pattern to retrieve the trees likelihood in \code{fileTREES}.
}
\item{names_locations}{
a vector of character strings that specifies the location names, in the same order as in \code{fileDATA}.
}
}
\details{
MCMC runs stop when the \code{num_step} is reached or when the ess of all estimated parameters equal \code{ess_lim}.
The respective ess for estimating convergence of the chains regarding each parameter is calculated by monitoring the autocorrelation between successive sampled states in the MCMC.
%Six files are written : the first one with the trees sampled (txt file), the second one with the locations sampled (log file), the third one with the model parameters sampled (log file), the fourth one with the simulation parameters (csv file), the fifth one with informations about the model parameters posterior distributions (csv file) and the sixth one with the object "phyloland" (Rdata file).
}
\value{
an object of class "phyloland" with the following components:
\item{trees}{ an object of class "multiPhylo" or "phylo" (package ape) containing the trees sampled in the MCMC.
If the \code{ess_lim} is not reached, the number of trees is \code{num_step}/\code{freq}.}
\item{locations}{ a matrix containing the internal locations sampled in the MCMC and the tips locations (columns) for each sampled tree(rows).}
\item{tips}{ a vector of character strings containing the tips names (from \code{fileTREES} and \code{fileDATA}).}
\item{space}{ a matrix containing the unique locations from \code{fileDATA}.}
\item{mcmc}{ a list containing the posterior distributions of each parameters.}
\item{sigma_limit}{ a vector indicating the threshold for sigma below which limited dispersal occurs.}
}
|
/phyloland/man/PLD_interface.Rd
|
permissive
|
LouisRanjard/Phyloland
|
R
| false
| false
| 5,110
|
rd
|
\name{PLD_interface}
\alias{PLD_interface}
\title{
Model parameters and internal locations estimations
}
\description{
Function that estimates model parameters, genealogies and internal locations through Bayesian Markov chain Monte Carlo (MCMC) algorithm.
}
\usage{
PLD_interface(fileTREES, fileDATA, num_step = 1e+05, freq = 100,
burnin = 0, ess_lim = 100, sigma = NA, lambda = NA, tau = NA,
num_step_sigma = 1, num_step_lambda = 1, num_step_tau = 1,
id_filena = NA, pattern_trees_likelihood = "treeLikelihood",
names_locations = NA)
}
\arguments{
\item{fileTREES}{
a character string that specifies the name of the file containing the phylogenetic trees.
A Nexus file (e.g. BEAST output) with tips names (no space characters allowed), the phylogenetic trees and the trees likelihood.
If only one tree is contained in the file, the genealogies are not sampled in the MCMC.
}
\item{fileDATA}{
a character string that specifies the name of the file containing the tips locations : a text file with 3 columns (separated by tabs).
The first one with the tips names (the same as in \code{fileTrees}), the second one with the location latitudes and the last one with the location longitudes (both in decimal degrees).
No header.
}
\item{num_step}{
a strictly positive integer specifying the length of the Markov Chain (the number of MCMC steps).
Suggested values : 50 000 - 100 000.
}
\item{freq}{
a strictly positive integer specifying how often the sampled model parameters and the locations are saved.
Suggested values : 50 - 100.
}
\item{burnin}{
a strictly positive integer specifying the number of trees dropped from the file \code{fileTREES}.
Suggested value : 10\% of the total number of trees, 0 in case of only one tree.
}
\item{ess_lim}{
a strictly positive integer specifying the Effective Sample Size.
The MCMC stops when the ess of all parameters reach this value.
}
\item{sigma}{
a vector of two elements, strictly positive values specifying the standard deviation of the normal distribution for dispersal steps in each dimension.
Default is NA: the 2 sigma parameters are estimated in the MCMC.
Alternatively, if values are specified the sigma parameters are not estimated by the MCMC.
}
\item{lambda}{
a strictly positive value specifying the probability of migrating to an occupied location.
Default is NA: the lambda parameter is estimated in the MCMC.
Alternatively, if a value is specified the lambda parameter is not estimated by the MCMC.
A lambda value equal to 1 indicates that competitive exclusion did not impact the colonization process.
}
\item{tau}{
a strictly positive value specifying the overall dispersal rate.
Default is NA: the tau parameter is estimated in the MCMC.
Alternatively, if a value specified the tau parameter is not estimated by the MCMC.
}
\item{num_step_sigma}{
a strictly positive integer specifying how many times the sigma parameters are sampled at each MCMC step.
}
\item{num_step_lambda}{
a strictly positive integer specifying how many times the lambda parameter is sampled at each MCMC step.
}
\item{num_step_tau}{
a strictly positive integer specifying how many times the tau parameter is sampled at each MCMC step.
}
\item{id_filena}{
an optional character string specifying the ID for the output files.
}
\item{pattern_trees_likelihood }{
a character string specifying the text pattern to retrieve the trees likelihood in \code{fileTREES}.
}
\item{names_locations}{
a vector of character strings that specifies the location names, in the same order as in \code{fileDATA}.
}
}
\details{
MCMC runs stop when the \code{num_step} is reached or when the ess of all estimated parameters equal \code{ess_lim}.
The respective ess for estimating convergence of the chains regarding each parameter is calculated by monitoring the autocorrelation between successive sampled states in the MCMC.
%Six files are written : the first one with the trees sampled (txt file), the second one with the locations sampled (log file), the third one with the model parameters sampled (log file), the fourth one with the simulation parameters (csv file), the fifth one with informations about the model parameters posterior distributions (csv file) and the sixth one with the object "phyloland" (Rdata file).
}
\value{
an object of class "phyloland" with the following components:
\item{trees}{ an object of class "multiPhylo" or "phylo" (package ape) containing the trees sampled in the MCMC.
If the \code{ess_lim} is not reached, the number of trees is \code{num_step}/\code{freq}.}
\item{locations}{ a matrix containing the internal locations sampled in the MCMC and the tips locations (columns) for each sampled tree(rows).}
\item{tips}{ a vector of character strings containing the tips names (from \code{fileTREES} and \code{fileDATA}).}
\item{space}{ a matrix containing the unique locations from \code{fileDATA}.}
\item{mcmc}{ a list containing the posterior distributions of each parameters.}
\item{sigma_limit}{ a vector indicating the threshold for sigma below which limited dispersal occurs.}
}
|
complete <- function(directory, id = 1:332) {
nobs <- function (id) {
#set the path
wd = directory
#get the file List in that directory
fileList = list.files(wd)
#extract the file names and store as numeric for comparison
file.names = as.numeric(sub("\\.csv$","",fileList))
#select files to be imported based on the user input or default
selected.files = fileList[match(id,file.names)]
#import data
Data = lapply(file.path(wd,selected.files),read.csv)
#convert into data frame
Data = do.call(rbind.data.frame,Data)
#calculate sum
sum(complete.cases(Data))}
#pass the values out
return (data.frame(id=id, nobs=sapply(id,nobs)))
}
|
/r_programming/wk2/complete.R
|
no_license
|
randallhelms/datasciencecoursera
|
R
| false
| false
| 681
|
r
|
complete <- function(directory, id = 1:332) {
nobs <- function (id) {
#set the path
wd = directory
#get the file List in that directory
fileList = list.files(wd)
#extract the file names and store as numeric for comparison
file.names = as.numeric(sub("\\.csv$","",fileList))
#select files to be imported based on the user input or default
selected.files = fileList[match(id,file.names)]
#import data
Data = lapply(file.path(wd,selected.files),read.csv)
#convert into data frame
Data = do.call(rbind.data.frame,Data)
#calculate sum
sum(complete.cases(Data))}
#pass the values out
return (data.frame(id=id, nobs=sapply(id,nobs)))
}
|
## These 2 functions are used to invert a square matrix and cache the result in memory.
## If the results are in memory then they are used otherwise the matrix is inverted and
## stored in memory
## This function makeCacheMatrix creates a list containing a function to -
## 1) set the value of the matrix
## 2) get the value of the matrix
## 3) set the inverse of the matrix
## 4) get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
#stores matrix
set <- function(y) {
x <<- y
m <<- NULL
}
#retrives matrix
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
## builds list
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function checks to see the inverse is available in memory. If it is it uses it.
## if not, it calculates the inverse and caches the result
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
## Use results if they are in memory
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## Else calculate the inverse and cache the results
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
skibugvt/ProgrammingAssignment2
|
R
| false
| false
| 1,421
|
r
|
## These 2 functions are used to invert a square matrix and cache the result in memory.
## If the results are in memory then they are used otherwise the matrix is inverted and
## stored in memory
## This function makeCacheMatrix creates a list containing a function to -
## 1) set the value of the matrix
## 2) get the value of the matrix
## 3) set the inverse of the matrix
## 4) get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
#stores matrix
set <- function(y) {
x <<- y
m <<- NULL
}
#retrives matrix
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
## builds list
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function checks to see the inverse is available in memory. If it is it uses it.
## if not, it calculates the inverse and caches the result
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
## Use results if they are in memory
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## Else calculate the inverse and cache the results
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/emr_operations.R
\name{emr_describe_step}
\alias{emr_describe_step}
\title{Provides more detail about the cluster step}
\usage{
emr_describe_step(ClusterId, StepId)
}
\arguments{
\item{ClusterId}{[required] The identifier of the cluster with steps to describe.}
\item{StepId}{[required] The identifier of the step to describe.}
}
\description{
Provides more detail about the cluster step.
See \url{https://www.paws-r-sdk.com/docs/emr_describe_step/} for full documentation.
}
\keyword{internal}
|
/cran/paws.analytics/man/emr_describe_step.Rd
|
permissive
|
paws-r/paws
|
R
| false
| true
| 575
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/emr_operations.R
\name{emr_describe_step}
\alias{emr_describe_step}
\title{Provides more detail about the cluster step}
\usage{
emr_describe_step(ClusterId, StepId)
}
\arguments{
\item{ClusterId}{[required] The identifier of the cluster with steps to describe.}
\item{StepId}{[required] The identifier of the step to describe.}
}
\description{
Provides more detail about the cluster step.
See \url{https://www.paws-r-sdk.com/docs/emr_describe_step/} for full documentation.
}
\keyword{internal}
|
# 6.8.80 dmc spio sfortran
#
# these routines perform i/o to specpr files
#--------------------------------------------------------------
subroutine readsp(lun,buf,key,iflg)
implicit integer*4 (i-n)
dimension buf(384)
read(lun,rec=key+1,iostat=ier) buf
if (ier != 0) {
write(6,20)lun,key,ier
iflg = 1
}
return
20 format(' readsp: lun=',i4,' key=',i4,'error=',i4/)
end
|
/src-local/specpr/src.specpr/gfit/io/readsp.r
|
no_license
|
ns-bak/tetracorder-tutorial
|
R
| false
| false
| 429
|
r
|
# 6.8.80 dmc spio sfortran
#
# these routines perform i/o to specpr files
#--------------------------------------------------------------
subroutine readsp(lun,buf,key,iflg)
implicit integer*4 (i-n)
dimension buf(384)
read(lun,rec=key+1,iostat=ier) buf
if (ier != 0) {
write(6,20)lun,key,ier
iflg = 1
}
return
20 format(' readsp: lun=',i4,' key=',i4,'error=',i4/)
end
|
testlist <- list(lims = structure(c(NaN, 8.80011477617474e+223, 1.97322161218351e+223, 5.22851419824833e+54), .Dim = c(2L, 2L)), points = structure(c(1.49166949647652e-154, NaN, 1.22034214522788e-321, 9.94672311440879e-203, 8.80011477617474e+223 ), .Dim = c(1L, 5L)))
result <- do.call(palm:::pbc_distances,testlist)
str(result)
|
/palm/inst/testfiles/pbc_distances/libFuzzer_pbc_distances/pbc_distances_valgrind_files/1612988218-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 330
|
r
|
testlist <- list(lims = structure(c(NaN, 8.80011477617474e+223, 1.97322161218351e+223, 5.22851419824833e+54), .Dim = c(2L, 2L)), points = structure(c(1.49166949647652e-154, NaN, 1.22034214522788e-321, 9.94672311440879e-203, 8.80011477617474e+223 ), .Dim = c(1L, 5L)))
result <- do.call(palm:::pbc_distances,testlist)
str(result)
|
# Reads the MSI file
# outliers = read file with outliers removed or not, batch = batch 1, 2 or 0 (all data)
# centerandscale = preprocess centerscale
readMSI <- function(outliersRemoved = F, batch = 0, centerandscale = T)
{
if(outliersRemoved) {
index <- 110
data <- read.table("VM R1 AND R3_NEWNEW.csv",sep = ",", header = TRUE, row.names = 1)
} else {
index <- 115
data <- read.table("VM R1 AND R3_Original.csv",sep = ",", header = TRUE, row.names = 1)
}
if(centerandscale) {
data <- applypreprocess(data,c("center","scale"))
}
if(batch == 1){
return(data[1:index,])
} else if(batch == 2){
return(data[(index+1):nrow(data),])
} else {
return(data)
}
}
# Same as MSI, except you can keep specific waves with waves parameter
readFTIR <- function(outliersRemoved = F, batch = 0, waves = c(400:4000))
{
if(outliersRemoved) {
index <- 110
data <- read.table("FTIR_newnew.csv",sep = ",", header = TRUE, row.names = 1)
} else {
index <- 116
data <- read.table("FTIR_R1-R3.csv",sep = ",", header = TRUE, row.names = 1)
}
# Keep only specific waves
data <- keepwave(data, waves)
if(batch == 1){
return(data[1:index,])
} else if(batch == 2){
return(data[(index+1):nrow(data),])
} else {
return(data)
}
}
# Applies baseline correction, binning, normalisation and center and scale to a given data
apply_paper_preprocess <- function(datalist, bin = 4, normalarea = F, centerandscale = T)
{
for(i in 1:length(datalist)){
datalist[[i]] <- applyspectrapreprocess(datalist[[i]], "baseline") # Baseline correction
datalist[[i]] <- applyspectrapreprocess(datalist[[i]], "bin", bin=bin) # Binning
datalist[[i]] <- applyspectrapreprocess(datalist[[i]], "normal", normalarea = normalarea) # Normalisation on peak on 1500:1700
if(centerandscale){
datalist[[i]] <- applypreprocess(datalist[[i]],c("center","scale")) # Center and scale
}
}
return(datalist)
}
# Performs the cooks distance test and plots it on a data frame
cooksdistancetest <- function(data)
{
glm <- glm(TVC~., data=data)
cooksd <- cooks.distance(glm)
plot <- plot(cooksd, pch="*", cex=2, main="Influential Obs by Cooks distance") # plot cook's distance
abline(h = 4*mean(cooksd, na.rm=T), col="red") # add cutoff line
text(x=1:length(cooksd)+1, y=cooksd, col="red", labels=ifelse(cooksd>4*mean(cooksd, na.rm=T),names(cooksd),""))
return(plot)
}
# Plots time vs temperature grouped by temperature for a given data
plotTVC_time_temp <- function(data, title = "")
{
data$Temperature <- as.numeric(as.character(stringr::str_extract(rownames(data),"[0-9]*((?=C)|(?=B)|(?=A))")))
data$Time <- as.numeric(as.character(stringr::str_extract(rownames(data),"[0-9]*(?=h)")))
# Extract tvc, time and temperature to another data frame
data <- data[,c("TVC", "Time", "Temperature")]
data <- as.data.frame(apply(data, 2, function(x) gsub("^$|^ $", 0, x))) # Change empty temperatures to 0
data[is.na(data)] <- 0 # Change NA temperatures to 0
# Change TVC and time to numeric from factor
data$TVC <- as.numeric(as.character(data$TVC))
data$Time <- as.numeric(as.character(data$Time))
data$Temperature <- factor(data$Temperature, levels=c(15,10,5,0), ordered=T) # Change order of temperatures
data <- data[order(data$Time),] # Sort by time
plot <- ggplot(data=data, aes(x=Time, y=TVC, group=Temperature)) +
geom_line(aes(color=Temperature)) +
ggtitle(title) +
scale_color_manual(values=c("red", "black", "blue","green")) +
theme_bw(base_family = "DMOFCB+AdvGulliv-R") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
legend.text = element_text(size=15), plot.title = element_text(hjust=0.5)) +
xlab("Storage time") +
ylab(expression(paste("TVC log"[10]," cfu g"^-1)))
return(plot)
}
# Does PCA on given data, prints summary and plots selected ncomps
doPca <- function(data, ncomp = c(1,2))
{
# Do PCA
pca <- PCA(data[,-which(colnames(data) %in% "TVC")], graph = F)
summary(pca)
# Create new column for TVC intervals
quantiles <- quantile(data$TVC, c(.33, .66)) # Find quantiles for TVC values
data$TVCinterval <- ifelse(data$TVC <= quantiles[1], 'low',
ifelse(data$TVC > quantiles[1] & data$TVC <= quantiles[2], 'med',
ifelse(data$TVC > quantiles[2], 'high', '')))
plot <- ggbiplot::ggbiplot(pca, choices = ncomp,
groups = data$TVCinterval, ellipse = TRUE, var.axes = FALSE) +
theme_bw(base_family = "DMOFCB+AdvGulliv-R") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), plot.title = element_text(hjust=0.5),
legend.text = element_text(size=15))
return(plot)
}
# Plots intensities of two samples, first sample is colored green, second red
msi_compare_two_samples <- function(data, sample_one, sample_two)
{
data <- as.data.frame(t(data)) # Transpose
data <- data[-which(rownames(data) %in% "TVC"),] # Remove TVC row
data$wave <- c(1:18) # Name the waves
plot <- ggplot(data,aes(x=wave,y=data[,sample_one])) +
geom_line(col="green") +
geom_line(aes(x=wave,y=data[,sample_two]),col="red") +
theme_bw(base_family = "DMOFCB+AdvGulliv-R") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
plot.title = element_text(hjust=0.5)) +
xlab("Mean number at certain wavelengths") +
ylab("Intensity") +
scale_x_continuous(breaks = data$wave) +
scale_fill_discrete(name="Spoilage level")
return(plot)
}
# Compares intensities of two samples over wavelengths
ftir_compare_two_samples <- function(data, sample_one, sample_two)
{
data <- data[rownames(data) %in% c(sample_one, sample_two),]
data <- as.data.frame(t(data))
data <- transform(data, wave=stringr::str_extract(rownames(data),"[0-9]+\\.+[0-9]*"))
data$wave <- as.numeric(as.character(data$wave)) # Set as numeric
difference <- (data[,1] - data[,2])[1]
data <- data[-nrow(data), ]
if(difference < 0) {
fresh <- data[, 1]
spoiled <- data[ ,2]
} else {
fresh <- data[, 2]
spoiled <- data[ ,1]
}
plot <- ggplot(data=data, aes(x=data[,3])) +
geom_line(aes(y=fresh, color="Fresh")) +
geom_line(aes(y=spoiled, color="Spoiled")) +
scale_color_manual(values=c("green","red")) +
theme_bw(base_family = "DMOFCB+AdvGulliv-R") +
labs(color="Level") +
ylab("Wave") +
xlab("Intensity")
return(plot)
}
# Custom partioning function for train/test split, option 0=naive, 1=time, 2=temperature, 4=Kennard-stone
split <- function(data, option)
{
set.seed(as.integer((as.double(Sys.time())*1000+Sys.getpid()) %% 2^31)) # Set seed
if(option == 0) # Naive with caret TVC
{
train_index = caret::createDataPartition(y=data$TVC, p=0.7, list=FALSE, times=1)
train <- data[train_index,]
test <- data[-train_index,]
}
else if(option == 1) # Splitted equally with time, <70 is low, 70<time<140 is med, >140 is high
{
data$Time <- as.numeric(as.character((stringr::str_extract(rownames(data),"[0-9]*(?=h)"))))
data <- data[order(data$Time), ] # Order by storage times
low <- data[70, 'Time' ]
med <- data[140, 'Time']
# Create new column for time intervals
data$Timeinterval <- ifelse(data$Time <= low, 'low',
ifelse(data$Time > low & data$Time <= med, 'med',
ifelse(data$Time > med, 'high', '')))
# Find smallest dataset and assign k from that (taking equal from all intervals)
k <- min(length(which(data$Timeinterval == 'low')),
length(which(data$Timeinterval == 'med')),
length(which(data$Timeinterval == 'high'))) * 0.7 + 6
train <- data.frame()
test <- data.frame()
for(level in c('low','med','high'))
{
dataLevel <- data[data$Timeinterval == level,] # Only select current interval
p <- k / nrow(dataLevel) # Assign p for caret createDataPartition function
train_index <- caret::createDataPartition(y=dataLevel$TVC, p=p, list=FALSE, times=1)
# Append the new sample rows
train <- rbind(train, dataLevel[train_index,])
test <- rbind(test, dataLevel[-train_index,])
}
train <- train[,-which(colnames(train)%in%"Time")]
test <- test[,-which(colnames(test)%in%"Time")]
}
else if(option == 2) # Split equally by temperature
{
data$Temperature <- as.numeric(as.character(stringr::str_extract(rownames(data),"[0-9]*((?=C)|(?=B)|(?=A))")))
data[is.na(data)] <- 0
# Find smallest dataset and assign k from that (taking equal from all intervals)
k <- min(length(which(data$Temperature == 0)),
length(which(data$Temperature == 5)),
length(which(data$Temperature == 10)),
length(which(data$Temperature == 15))) * 0.7 + 6
train <- data.frame()
test <- data.frame()
for(temp in c(0,5,10,15))
{
dataTemperature = data[data$Temperature == temp,] # Only select current temp
p = k / nrow(dataTemperature) # Assign p for caret createDataPartition function
train_index = caret::createDataPartition(y=dataTemperature$TVC, p=p, list=FALSE, times=1)
# Append the new sample rows
train <- rbind(train, dataTemperature[train_index,])
test <- rbind(test, dataTemperature[-train_index,])
}
train <- train[,-which(colnames(train)%in%"Temperature")]
test <- test[,-which(colnames(test)%in%"Temperature")]
}
else if(option == 3) # Kennard-stone algorithm
{
ken <- kenStone(data, k=nrow(data)*0.7, metric="euclid")
train <- data[ken$model,]
test <- data[ken$test,]
}
return(list("train" = train, "test" = test))
}
# Custom predict functi10on which prints & returns prediction vs actual plot and displays RMSE/ACC for a single model
custompredict <- function(model, test)
{
# Predict
prediction <- stats::predict(model, test)
# RMSE calc
rmse <- Metrics::rmse(test$TVC, prediction)
# Within 1 cfu accuracy calc
difference <- as.data.frame(abs(test$TVC-prediction))
accuracy <- sum(difference <= 1) / nrow(difference)
x <- cbind(prediction,test$TVC)
x <- as.data.frame(x[order(x[,2]),])
# Plot
title <- paste(model$method)
colnames(x) <- c("Predicted", "Actual")
plot <- ggplot(data=x, aes(x=Actual,y=Predicted)) +
geom_point() +
ggtitle(title) +
ylim(c(0,9)) +
xlim(c(0,9)) +
geom_abline(col="blue") +
geom_abline(intercept = 1, col="red") +
geom_abline(intercept = -1, col="red") +
geom_label(label=paste("Accuracy: ", round(accuracy*100, digits = 2),"%"),
x=7, y=3, size=5, family="DMOFCB+AdvGulliv-R",label.size = 0) +
geom_label(label=paste("RMSE: ", round(rmse, digits = 4)),
x=7, y=3.5, size=5, family="DMOFCB+AdvGulliv-R", label.size = 0) +
theme_bw(base_family = "DMOFCB+AdvGulliv-R") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
plot.title = element_text(hjust=0.5), legend.position = "none") +
xlab(expression(paste("Sample Bacterial Count log"[10]," cfu g"^-1))) +
ylab(expression(paste("Predicted Bacterial Count log"[10]," cfu g"^-1)))
print(plot)
}
# Filter wavelengths from FTIR, list = waves to keep as integer ranges (i.e. 700:1000, 2000:2500)
keepwave <- function(data, list)
{
# Transpose to filter wave-lengths
data_T <- as.data.frame(t(data))
# Add waves as a column
data_T$wave <- round(as.numeric(stringr::str_extract(rownames(data_T[,-1]),"[0-9]+\\.+[0-9]*")),0)
# Add temporary 0 to TVC wave
data_T["TVC","wave"] <- 0
list <- c(list, 0)
# Filter wavelengths (include TVC)
x <- data_T[data_T$wave %in% list, ]
# Transpose back
x <- as.data.frame(t(x))
return(x[-nrow(x),])
}
# Set up a grid list for CV
gridList <- function()
{
plsgrid <- expand.grid(ncomp = 1:40)
svmRadialgrid <- expand.grid(sigma = 2^c(-25, -20, -15, -10, -5, 0), C = 2^c(0:5))
svmLineargrid <- expand.grid(C = 2^c(0:5))
knngrid <- expand.grid(k = 1:40)
nnetgrid <- expand.grid(size = c(1:2), decay = seq(from = 0.1, to = 1, by = 0.1))
larsgrid <- expand.grid(fraction = seq(from = 0, to = 1, by = 0.01))
ridgegrid <- expand.grid(lambda=seq(0.00001,0.0001,0.00001))
rfgrid <- expand.grid(mtry=c(2,4,6,12,24,48))
# Create a list of grids
gridlist <- list(plsgrid=plsgrid, svmRadialgrid=svmRadialgrid, svmLineargrid=svmLineargrid,
knngrid=knngrid, nnetgrid=nnetgrid, larsgrid=larsgrid,
rfgrid=rfgrid, nnetgrid=nnetgrid, ridgegrid=ridgegrid)
return(gridlist)
}
# Splits the data depending on low and high parameters, plots the mean intensities of the split data over wavelengths
meanintensitygraph <- function(data, low, high)
{
dataF <- data[data$TVC <= low,]
dataS <- data[data$TVC >= high, ]
dataF <- as.data.frame(t(dataF))
dataF <- dataF[-1,]
dataFsums <- data.frame(matrix(ncol=2,nrow=ncol(data)-1))
dataFsums[,1] <- rowSums(dataF) / ncol(dataF)
rownames(dataFsums) <- rownames(dataF)
dataFsums <- transform(dataFsums, wave=stringr::str_extract(rownames(dataF),"([0-9]*)(?=\\.)"))
dataFsums <- dataFsums[,-2]
dataFsums$wave <- as.numeric(as.character(dataFsums$wave))
dataS <- as.data.frame(t(dataS))
dataS <- dataS[-1,]
dataSsums <- data.frame(matrix(ncol=2,nrow=ncol(data)-1))
dataSsums[,1] <- rowSums(dataS) / ncol(dataS)
rownames(dataSsums) <- rownames(dataS)
dataSsums <- transform(dataSsums, wave=stringr::str_extract(rownames(dataS),"([0-9]*)(?=\\.)"))
dataSsums <- dataSsums[,-2]
dataSsums$wave <- as.numeric(as.character(dataSsums$wave))
dataSub <- data.frame(matrix(ncol=4,nrow=ncol(data)-1))
dataSub[,1] <- dataFsums$X1
dataSub[,2] <- dataSsums$X1
dataSub[,3] <- dataFsums$wave
dataSub[,4] <- abs(dataSub[,2] - dataSub[,1])
dataSub <- dataSub[-nrow(dataSub), ]
colnames(dataSub) <- c("FreshSums", "SpoiledSums", "Wave","Difference")
plot(dataSub[,3], dataSub[,1], type="l", xlab="Wave", ylab="Intensity", col="green")
legend(x=1000, y=1.5, c("Fresh","Spoiled"),cex=1,col=c("green","red"),pch=c(15,15), bty = "n")
lines(dataSub[,3], dataSub[,2], col="red")
plot(dataSub[,3], abs(dataSub[,2] - dataSub[,1]), type="l", xlab="wave",ylab = "Abs intensity difference")
return(dataSub)
}
# Applies caret pre-process methods in methodlist to the data
# If pca pre-process is selected, pcaComp = components to keep
applypreprocess <- function(data, methodlist, pcaComp=0)
{
TVC <- data$TVC
data <- data[, -which(colnames(data)%in%"TVC")]
if(pcaComp > 0) {
pp <- preProcess(data, method = methodlist, pcaComp = pcaComp)
} else {
pp <- preProcess(data, method = methodlist)
}
data <- predict(pp, data)
data$TVC <- TVC
return(data)
}
# Creates a hyperspec object, type is either "FTIR" or else ("MSI")
createspc <- function(data, type="FTIR") # Removes TVC
{
data <- data[,-which(colnames(data) %in% "TVC")]
if(type == "FTIR") {
waves <- as.numeric(stringr::str_extract(colnames(data),"[0-9]+\\.+[0-9]*"))
spc <- new("hyperSpec", spc=data, wavelength=waves)
}
else {
spc <- new("hyperSpec", spc=data, wavelength=as.numeric(colnames(data)))
}
return(spc)
}
# Applies spectral pre-process from the hyperspec package
# sgwindow = Savitzky-Golay filtering window, deriv = derivation factor, bin = binning factor
# type = "FTIR" or "VM", normalarea = normalise under area or a point
applyspectrapreprocess <- function(data, preproc, sgwindow=0, deriv=1, bin=4, type="FTIR", normalarea=F)
{
TVC <- data$TVC
if(preproc == "baseline") {
spc <- createspc(data,type)
bl <- spc.fit.poly.below(spc)
spc <- spc - bl
data <- as.data.frame(spc$spc)
data$TVC <- TVC
} else if(preproc == "snv") {
data <- data[,-which(colnames(data) %in% "TVC")]
data <- as.data.frame(standardNormalVariate(data))
data$TVC <- TVC
} else if(preproc == "snv-d") {
data <- as.data.frame(detrend(X=spc$spc, wav=attributes(spc)$wavelength))
} else if(preproc == "s-g"){
data <- data[,-which(colnames(data) %in% "TVC")]
data <- as.data.frame(savitzkyGolay(data, m=2,p=3,w=sgwindow))
data$TVC <- TVC
} else if(preproc == "deriv"){
spc <- createspc(data,"VM")
data <- as.data.frame(t(diff(t(spc$spc), differences=deriv)))
data$TVC <- TVC
} else if(preproc == "normal"){
spc <- createspc(data,type)
if(normalarea){
spc <- spc/ rowMeans(spc[, ,1500~1700])
} else if(type=="FTIR" & !normalarea){
factors <- 1/apply(spc[, ,1500~1700],1,mean)
spc <- sweep(spc,1,factors,"*")
} else{
spc <- sweep(spc,1,mean,"/")
}
data <- as.data.frame(spc$spc)
data$TVC <- TVC
} else if(preproc == "bin"){
spc <- createspc(data,type)
spc <- spc.bin(spc,bin)
data <- as.data.frame(spc$spc)
data$TVC <- TVC
}
return(data)
}
# Predicts and calculates performances of multiple models on a test data
# models = caret model list, test = test data
# plot = T would plot each model's predicted vs actual with custompredict()
multiplemodelpredict <- function(models, test, plot=F)
{
rmseResults <- data.frame(matrix(nrow=length(models),ncol=5))
colnames(rmseResults) <- c("RMSE", "MAE","R2", "Accuracy","name")
for(i in 1:length(models))
{
model <- models[[i]]
# RMSE
prediction <- predict(model, test)
rmse <- Metrics::rmse(test$TVC, prediction)
# R2
r2 <- MLmetrics::R2_Score(test$TVC,prediction)
# MAE
mae <- Metrics::mae(test$TVC,prediction) # Calculate mae for the current model and iter
# Within 1 cfu accuracy
difference <- as.data.frame(abs(test$TVC-prediction))
accuracy <- sum(difference <= 1) / nrow(difference)
if(plot == T)
{
custompredict(model, test)
}
rmseResults[i,] <- list(rmse, mae, r2, accuracy, model$method)
}
rownames(rmseResults) <- rmseResults[,5]
modelmethods <- rmseResults$name
rmseResults <- round(rmseResults[,1:4],4)
rmseResults$name <- modelmethods
return(rmseResults)
}
# A.k.a monte-carlo CV, does multiple iterations with multiple models, multiple splits and calculate average performance metrics
multipleiterations <- function(data, iters, parallelcores = 3)
{
x <- iters
methodlist <- c("pls", "svmLinear", "svmRadial","rf","knn", "pcr","lars", "ridge", "nnet")
rmseResults <- data.frame(matrix(nrow=length(methodlist), ncol=1)) # DF to store RMSE results
rownames(rmseResults) <- methodlist
maeResults <- data.frame(matrix(nrow=length(methodlist), ncol=1)) # DF to store RMSE results
rownames(maeResults) <- methodlist
r2Results <- data.frame(matrix(nrow=length(methodlist), ncol=1)) # DF to store RMSE results
rownames(r2Results) <- methodlist
accResults <- data.frame(matrix(NA, nrow=length(methodlist), ncol=1)) # DF to save acc results
rownames(accResults) <- methodlist
max <- 0
min <- 50
cl <- makePSOCKcluster(parallelcores) # Parelell processing with 3 cores
registerDoParallel(cl)
for(i in 1:x)
{
print(paste("Starting iteration", i ,"..."))
starttime <- Sys.time()
set.seed(as.integer((as.double(Sys.time())*1000+Sys.getpid()) %% 2^31))
# Split to train and test
return <- split(data, 0)
train <- return$train
test <- return$test
gridlist <- gridList()
fitControl <- trainControl(method = "repeatedcv", number = 10, repeats = 3,
savePredictions = "final"
,allowParallel = T)
models <- caretEnsemble::caretList(TVC~., data=train, trControl = fitControl, metric = "RMSE", continue_on_fail = T
,tuneList = list(
pls=caretModelSpec(method="pls", tuneGrid=gridlist$plsgrid)
,svmLinear=caretModelSpec(method="svmLinear", tuneGrid=gridlist$svmLineargrid)
,svmRadial=caretModelSpec(method="svmRadial", tuneGrid=gridlist$svmRadialgrid)
,rf=caretModelSpec(method="rf", tuneGrid=gridlist$rfgrid, ntree=500)
,knn=caretModelSpec(method="knn", tuneGrid=gridlist$knngrid)
,pcr=caretModelSpec(method="pcr", tuneGrid=gridlist$plsgrid)
,lars=caretModelSpec(method="lars", tuneGrid=gridlist$larsgrid)
,ridge=caretModelSpec(method="ridge", tuneGrid=gridlist$ridgegrid)
,nnet=caretModelSpec(method="nnet", tuneGrid=gridlist$nnetgrid, linout = TRUE, maxit=1000)
)
)
# Predictions for test set
predictionsDF <- as.data.frame(predict(models, newdata = test))
# Calculate prediction difference
predDiff <- abs(predictionsDF - test$TVC)
# Calculate rmse for each model
rmseVector <- c()
maeVector <- c()
r2Vector <- c()
for(k in 1:length(models))
{
# RMSE
rmse <- Metrics::rmse(test$TVC,predictionsDF[,k]) # Calculate rmse for the current model and iter
rmseVector <- c(rmseVector, rmse) # Add to the rmselist for all rmse on this iter
# MAE
mae <- Metrics::mae(test$TVC,predictionsDF[,k]) # Calculate mae for the current model and iter
maeVector <- c(maeVector, mae) # Add to the rmselist for all mae on this iter
# R-squared
r2 <- MLmetrics::R2_Score(test$TVC,predictionsDF[,k]) # Calculate rsquared for the current model and iter
r2Vector <- c(r2Vector, r2) # Add to the rmselist for all rsquared on this iter
# ACCURACY
# Check how many predictions are within 1 cfu/log
table <- table(predDiff[,k] < 1)
if(is.na(table[2])){
pred <- 1
} else {
pred <- as.numeric(table[2]) / as.numeric(table[1] + table[2])
}
accResults[k,i] <- pred # Store current model+iter accuracy on the DF
}
# Add RMSEs together for this iteration
rmseResults <- cbind(rmseResults,rmseVector)
# Add MAEs together for this iteration
maeResults <- cbind(maeResults,maeVector)
# Add R-squareds together for this iteration
r2Results <- cbind(r2Results,r2Vector)
print(paste("This iteration took:", round(Sys.time() - starttime,3)))
}
stopCluster(cl) # Stop parellel processing
registerDoSEQ()
rmseResults <- rmseResults[,-1, drop=F]
maeResults <- maeResults[,-1, drop=F]
r2Results <- r2Results[,-1, drop=F]
colnames(rmseResults) <- 1:x
colnames(maeResults) <- 1:x
colnames(r2Results) <- 1:x
colnames(accResults) <- 1:x
rmseResults <- round(rmseResults,4)
maeResults <- round(maeResults,4)
r2Results <- round(r2Results,4)
accResults <- round(accResults,4)
returnlist <- list(rmseResults=rmseResults, maeResults=maeResults,
r2Results=r2Results, accResults=accResults)
return(returnlist)
}
# Combines performance metrics(recieved from multipleiterations())into one data frame
getfinalresults <- function(rmseResults, maeResults, r2Results, accResults)
{
# RMSE results
rmseResults$mean <- apply(rmseResults,1,FUN=mean)
# MAE results
maeResults$mean <- apply(maeResults,1,FUN=mean)
# R2 results
r2Results$mean <- apply(r2Results,1,FUN=mean)
# Accuracy results
accResults$mean <- apply(accResults,1,FUN=mean)
# Mean combined final results
finalresults <- data.frame(matrix(nrow=nrow(rmseResults),ncol=4)) # Create results DF
rownames(finalresults) <- rownames(accResults)
colnames(finalresults) <- c("RMSE", "MAE","R2","Accuracy")
finalresults[,1] <- round(rmseResults$mean,4)
finalresults[,2] <- round(maeResults$mean,4)
finalresults[,3] <- round(r2Results$mean,4)
finalresults[,4] <- round(accResults$mean,4)
finalresults$name <- rownames(finalresults)
return(finalresults)
}
# Does multiple iterations with a model on a data frame and averages variable importance over iterations
# Only pls and nnet supported
variableimportancetest <- function(data, iters, model, parallelcores = 3)
{
cl <- makePSOCKcluster(parallelcores) # Set parallel processing
registerDoParallel(cl)
result <- data.frame(matrix(ncol=1,nrow=ncol(data)-1))
for(i in 1:iters)
{
print(paste("Iteration",i))
starttime <- Sys.time()
# Split to train and test
return <- split(data, 0)
train <- return$train
test <- return$test
set.seed(as.integer((as.double(Sys.time())*1000+Sys.getpid()) %% 2^31))
# Set grids
nnetgrid <- expand.grid(size = c(1,2), decay = seq(0.1, 1, 0.1))
plsgrid <- expand.grid(ncomp=seq(1,40,1))
# Set CV method
fitControl <- trainControl(method = "repeatedcv", number = 5, repeats=3
,savePredictions = "final"
,allowParallel = T)
# Run the model
if(model == "pls"){
models <- caretEnsemble::caretList(TVC~., data=data, trControl = fitControl, metric = "RMSE"
,tuneList = list(
pls=caretModelSpec(method="pls", tuneGrid=plsgrid)
)
)
} else if(model == "nnet"){
models <- caretEnsemble::caretList(TVC~., data=data, trControl = fitControl, metric = "RMSE"
,tuneList = list(
nnet=caretModelSpec(method="nnet", tuneGrid=nnetgrid,
linout = TRUE, maxit=1000, MaxNWts=7000)
)
)
}
# Get variable importance
imp <- as.data.frame(varImp(models[[1]])$importance)
result <- cbind(result,imp)
print(paste("Iteration took", Sys.time()-starttime))
}
stopCluster(cl)
registerDoSEQ()
result <- result[,-1]
# Put mean as extra col
result$mean <- apply(result,1,FUN=mean)
# Extract mean importance corresponding to integer waves
impmean <- result[,"mean",drop=F]
impmean <- transform(impmean, wave=as.numeric(stringr::str_extract(rownames(impmean),"[0-9]+\\.+[0-9]*")))
impmean <- impmean[order(-impmean$mean), ]
impmean$wave <- round(impmean$wave,0)
return(list(iterresult = result, meanimp = impmean))
}
# Plots a heatmap with multiple results from getfinalresults()
# resultlist = list(finalresults1, finalresult2,..), names = labels for each result (B1, B2..)
# excludeModel = exclude these models by name ("knn", "rf"..)
# includeModel = include these models by name ("knn", "rf"..)
finalheatmap <- function(resultlist, names, excludeModel = c(), includeModel = c(), limits=c(50,90))
{
getperformance <- function(resultlist, names, perfname)
{
for(i in 1:length(resultlist))
{
resultlist[[i]] <- resultlist[[i]][,perfname, drop=F]
resultlist[[i]]$model <- rownames(resultlist[[i]])
resultlist[[i]]$batch <- rep(names[i],nrow(resultlist[[i]]))
}
return(resultlist)
}
rmselist <- getperformance(resultlist, names, "RMSE")
maelist <- getperformance(resultlist, names, "MAE")
rsquaredlist <- getperformance(resultlist, names, "R2")
accuracylist <- getperformance(resultlist, names, "Accuracy")
forheatmapRMSE <- ldply(rmselist, data.frame)
forheatmapMAE <- ldply(maelist, data.frame)
forheatmapR2 <- ldply(rsquaredlist, data.frame)
forheatmapAcc <- ldply(accuracylist, data.frame)
forheatmapAcc$Accuracy <- forheatmapAcc$Accuracy * 100
for(model in excludeModel)
{
forheatmapRMSE <- forheatmapRMSE[which(!forheatmapRMSE$model %in% excludeModel),]
forheatmapMAE <- forheatmapMAE[which(!forheatmapMAE$model %in% excludeModel),]
forheatmapR2 <- forheatmapR2[which(!forheatmapR2$model %in% excludeModel),]
forheatmapAcc <- forheatmapAcc[which(!forheatmapAcc$model %in% excludeModel),]
}
for(model in includeModel)
{
forheatmapRMSE <- forheatmapRMSE[which(forheatmapRMSE$model %in% includeModel),]
forheatmapR2 <- forheatmapR2[which(forheatmapR2$model %in% excludeModel),]
forheatmapAcc <- forheatmapAcc[which(forheatmapAcc$model %in% excludeModel),]
forheatmapAcc <- forheatmapAcc[which(forheatmapAcc$model %in% includeModel),]
}
forheatmapAcc <- forheatmapAcc[order(-forheatmapAcc$Accuracy),]
plot <- ggplot(forheatmapAcc, aes(batch, reorder(model,Accuracy))) +
geom_tile(aes(fill=Accuracy)) +
scale_fill_gradientn(colors=colorRampPalette(c("tomato2","green"))(3), limits=limits, oob=squish) +
theme_bw(base_size = 20) +
labs(x="", y="") +
scale_x_discrete(expand=c(0,0)) +
scale_y_discrete(expand=c(0,0)) +
geom_text(data=forheatmapAcc,aes(batch,model, label=paste("Acc:",Accuracy,"%")),
vjust=-1, hjust=1, size=4.5, family="DMOFCB+AdvGulliv-R") +
geom_text(data=forheatmapRMSE,aes(batch,model,label=paste("RMSE:",RMSE)),
vjust=2, hjust=1, size=4.5, family="DMOFCB+AdvGulliv-R") +
geom_text(data=forheatmapMAE,aes(batch,model,label=paste("MAE:",MAE)),
vjust=-1, hjust=-0.2, size=4.5, family="DMOFCB+AdvGulliv-R") +
geom_text(data=forheatmapR2,aes(batch,model,label=paste("R2:",R2)),
vjust=2, hjust=-0.2, size=4.5, family="DMOFCB+AdvGulliv-R") +
theme(legend.position = "none", text=element_text(family="DMOFCB+AdvGulliv-R"))
print(plot)
return(plot)
}
#----(NOT USED IN PAPER)----
# Plots the importance of variables over wavelengths
plotvariableimportance <- function(model)
{
imp <- as.data.frame(varImp(model)$importance)
imp <- transform(imp, wave=as.numeric(stringr::str_extract(rownames(imp),"[0-9]+\\.+[0-9]*")))
imp <- imp[order(imp$wave), ]
ggplot(data=imp, aes(x=wave, y=Overall)) +
geom_point()
}
# Filter waves according to RF variable importance, returns filtered data
# topx = Return FTIR data with topx waves. threshold = Return FTIR data with filtering values under threshold
# if topx and threshold is 0, returns the importance list
findandkeepwavesRF <- function(data, topx=0, threshold=0) # no topx argument returns the list
{
cl <- makePSOCKcluster(3)
registerDoParallel(cl)
# Find optimal mtry value (RF parameter)
tune <- tuneRF(data[,-1], data[,1], trace = FALSE, plot = FALSE)
mtry <- as.numeric(row.names(tune)[(which(tune[,2]==min(tune[,2])))])
# Train model
model <- randomForest(TVC~., data=data, mtry = mtry, ntree=1000, importance=T)
importance <- as.data.frame(model$importanceSD)
importance <- transform(importance, wave=stringr::str_extract(rownames(importance),"([0-9]*)(?=\\.)"))
importance$wave <- as.numeric(as.character(importance$wave))
stopCluster(cl)
registerDoSEQ()
if(topx != 0) {
keeplist <- top_n(importance, topx, importance$model.importanceSD)$wave
} else if(threshold !=0){
keeplist <- importance[importance$model.importanceSD > threshold,]$wave
} else {
return(importance)
}
data <- keepwave(data, keeplist)
return(data)
}
# Plots std of wavelengths
plotSTD <- function(data)
{
data_T <- as.data.frame(t(data)) # Transpose
data_T <- data_T[-which(rownames(data_T) %in% "TVC"),] # Remove TVC row
data_T <- transform(data_T,SD=apply(data_T,1,sd)) # SD of every row
data_T$waves <- as.numeric(stringr::str_extract(rownames(data_T),"[0-9]+\\.+[0-9]*")) # Create waves as column
plot <- ggplot(data=data_T, aes(x=waves,y=SD)) +
geom_line()+
xlab("Wave length")+
ylab("Std")+
ggtitle("Standard deviation of intensity over wave lengths") +
scale_x_continuous("Wave", breaks=seq(500,4000,500)) +
theme_classic()
print(plot)
return(data_T[,c("SD","waves")])
}
# Filters the data according to the std values
# std = std data frame, threshold = lowest std allowed
filterwavesfromSTD <- function(data, std, threshold)
{
std <- std[which(std$SD > threshold),]
names <- c(rownames(std),'TVC')
data <- data[, colnames(data) %in% names]
return(data)
}
# Calculates and plots the linear model coefficients of a data
lmcoefficient <- function(data)
{
model <- lm(TVC~., data=data)
coef <- as.data.frame(model$coefficients)
coef <- transform(coef, wave=as.numeric(stringr::str_extract(rownames(coef),"([0-9]*)(?=\\.)")))
coef <- coef[-1,]
coef <- na.omit(coef)
plot <- ggplot(data=coef, aes(x=wave, y=model.coefficients)) +
geom_line()
print(plot)
return(coef)
}
# Filters waves in FTIR by contribution to PCA
# contribution = contribution data frame calculated elsewhere, threshold = values under threshold will be filtered
filterwavesbyPCAcontrib <- function(data, contribution, threshold)
{
filterlist <- rownames(contribution[contribution$sum > threshold,]) # Filter contribs below threshold
filterlist <- str_extract(filterlist, "[0-9]*(?=\\.)") # Convert row names to wave numbers
data <- keepwave(data, filterlist) # Filter the data
return(data)
}
# Tries to find patterns of train/test split from a list of RMSE results for multiple models
findpatternbestmodels <- function(RMSEbestlist)
{
RMSEbesttrain <- cbind(rownames(RMSEbestlist[[1]]$trainingData), rownames(RMSEbestlist[[2]]$trainingData))
for(i in 3:length(RMSEbestlist))
{
RMSEbesttrain <- cbind(RMSEbesttrain, rownames(RMSEbestlist[[i]]$trainingData))
}
countbest <- data.frame(matrix(ncol=2)) # create DF to store count for each sample appeared in training sets
for(rowname in rownames(data)) # Check each sample
{
countbest <- rbind(countbest, length(which(RMSEbesttrain == rowname)))
}
countbest <- countbest[-1,] # remove NA row
rownames(countbest) <- rownames(data) # set row names as sample names
countbest <- countbest[,-2, drop=F] # drop unnecessary
colnames(countbest)[1] <- "Count"
countbest$pct <- round(countbest$Count / length(RMSEbestlist), 2) # What percent did the sample got used in all iters
return(countbest)
}
|
/Util.R
|
no_license
|
ozcanonur/Chicken_ML
|
R
| false
| false
| 35,295
|
r
|
# Reads the MSI file
# outliers = read file with outliers removed or not, batch = batch 1, 2 or 0 (all data)
# centerandscale = preprocess centerscale
readMSI <- function(outliersRemoved = F, batch = 0, centerandscale = T)
{
if(outliersRemoved) {
index <- 110
data <- read.table("VM R1 AND R3_NEWNEW.csv",sep = ",", header = TRUE, row.names = 1)
} else {
index <- 115
data <- read.table("VM R1 AND R3_Original.csv",sep = ",", header = TRUE, row.names = 1)
}
if(centerandscale) {
data <- applypreprocess(data,c("center","scale"))
}
if(batch == 1){
return(data[1:index,])
} else if(batch == 2){
return(data[(index+1):nrow(data),])
} else {
return(data)
}
}
# Same as MSI, except you can keep specific waves with waves parameter
readFTIR <- function(outliersRemoved = F, batch = 0, waves = c(400:4000))
{
if(outliersRemoved) {
index <- 110
data <- read.table("FTIR_newnew.csv",sep = ",", header = TRUE, row.names = 1)
} else {
index <- 116
data <- read.table("FTIR_R1-R3.csv",sep = ",", header = TRUE, row.names = 1)
}
# Keep only specific waves
data <- keepwave(data, waves)
if(batch == 1){
return(data[1:index,])
} else if(batch == 2){
return(data[(index+1):nrow(data),])
} else {
return(data)
}
}
# Applies baseline correction, binning, normalisation and center and scale to a given data
apply_paper_preprocess <- function(datalist, bin = 4, normalarea = F, centerandscale = T)
{
for(i in 1:length(datalist)){
datalist[[i]] <- applyspectrapreprocess(datalist[[i]], "baseline") # Baseline correction
datalist[[i]] <- applyspectrapreprocess(datalist[[i]], "bin", bin=bin) # Binning
datalist[[i]] <- applyspectrapreprocess(datalist[[i]], "normal", normalarea = normalarea) # Normalisation on peak on 1500:1700
if(centerandscale){
datalist[[i]] <- applypreprocess(datalist[[i]],c("center","scale")) # Center and scale
}
}
return(datalist)
}
# Performs the cooks distance test and plots it on a data frame
cooksdistancetest <- function(data)
{
glm <- glm(TVC~., data=data)
cooksd <- cooks.distance(glm)
plot <- plot(cooksd, pch="*", cex=2, main="Influential Obs by Cooks distance") # plot cook's distance
abline(h = 4*mean(cooksd, na.rm=T), col="red") # add cutoff line
text(x=1:length(cooksd)+1, y=cooksd, col="red", labels=ifelse(cooksd>4*mean(cooksd, na.rm=T),names(cooksd),""))
return(plot)
}
# Plots time vs temperature grouped by temperature for a given data
plotTVC_time_temp <- function(data, title = "")
{
data$Temperature <- as.numeric(as.character(stringr::str_extract(rownames(data),"[0-9]*((?=C)|(?=B)|(?=A))")))
data$Time <- as.numeric(as.character(stringr::str_extract(rownames(data),"[0-9]*(?=h)")))
# Extract tvc, time and temperature to another data frame
data <- data[,c("TVC", "Time", "Temperature")]
data <- as.data.frame(apply(data, 2, function(x) gsub("^$|^ $", 0, x))) # Change empty temperatures to 0
data[is.na(data)] <- 0 # Change NA temperatures to 0
# Change TVC and time to numeric from factor
data$TVC <- as.numeric(as.character(data$TVC))
data$Time <- as.numeric(as.character(data$Time))
data$Temperature <- factor(data$Temperature, levels=c(15,10,5,0), ordered=T) # Change order of temperatures
data <- data[order(data$Time),] # Sort by time
plot <- ggplot(data=data, aes(x=Time, y=TVC, group=Temperature)) +
geom_line(aes(color=Temperature)) +
ggtitle(title) +
scale_color_manual(values=c("red", "black", "blue","green")) +
theme_bw(base_family = "DMOFCB+AdvGulliv-R") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
legend.text = element_text(size=15), plot.title = element_text(hjust=0.5)) +
xlab("Storage time") +
ylab(expression(paste("TVC log"[10]," cfu g"^-1)))
return(plot)
}
# Does PCA on given data, prints summary and plots selected ncomps
doPca <- function(data, ncomp = c(1,2))
{
# Do PCA
pca <- PCA(data[,-which(colnames(data) %in% "TVC")], graph = F)
summary(pca)
# Create new column for TVC intervals
quantiles <- quantile(data$TVC, c(.33, .66)) # Find quantiles for TVC values
data$TVCinterval <- ifelse(data$TVC <= quantiles[1], 'low',
ifelse(data$TVC > quantiles[1] & data$TVC <= quantiles[2], 'med',
ifelse(data$TVC > quantiles[2], 'high', '')))
plot <- ggbiplot::ggbiplot(pca, choices = ncomp,
groups = data$TVCinterval, ellipse = TRUE, var.axes = FALSE) +
theme_bw(base_family = "DMOFCB+AdvGulliv-R") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), plot.title = element_text(hjust=0.5),
legend.text = element_text(size=15))
return(plot)
}
# Plots intensities of two samples, first sample is colored green, second red
msi_compare_two_samples <- function(data, sample_one, sample_two)
{
data <- as.data.frame(t(data)) # Transpose
data <- data[-which(rownames(data) %in% "TVC"),] # Remove TVC row
data$wave <- c(1:18) # Name the waves
plot <- ggplot(data,aes(x=wave,y=data[,sample_one])) +
geom_line(col="green") +
geom_line(aes(x=wave,y=data[,sample_two]),col="red") +
theme_bw(base_family = "DMOFCB+AdvGulliv-R") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
plot.title = element_text(hjust=0.5)) +
xlab("Mean number at certain wavelengths") +
ylab("Intensity") +
scale_x_continuous(breaks = data$wave) +
scale_fill_discrete(name="Spoilage level")
return(plot)
}
# Compares intensities of two samples over wavelengths
ftir_compare_two_samples <- function(data, sample_one, sample_two)
{
data <- data[rownames(data) %in% c(sample_one, sample_two),]
data <- as.data.frame(t(data))
data <- transform(data, wave=stringr::str_extract(rownames(data),"[0-9]+\\.+[0-9]*"))
data$wave <- as.numeric(as.character(data$wave)) # Set as numeric
difference <- (data[,1] - data[,2])[1]
data <- data[-nrow(data), ]
if(difference < 0) {
fresh <- data[, 1]
spoiled <- data[ ,2]
} else {
fresh <- data[, 2]
spoiled <- data[ ,1]
}
plot <- ggplot(data=data, aes(x=data[,3])) +
geom_line(aes(y=fresh, color="Fresh")) +
geom_line(aes(y=spoiled, color="Spoiled")) +
scale_color_manual(values=c("green","red")) +
theme_bw(base_family = "DMOFCB+AdvGulliv-R") +
labs(color="Level") +
ylab("Wave") +
xlab("Intensity")
return(plot)
}
# Custom partioning function for train/test split, option 0=naive, 1=time, 2=temperature, 4=Kennard-stone
split <- function(data, option)
{
set.seed(as.integer((as.double(Sys.time())*1000+Sys.getpid()) %% 2^31)) # Set seed
if(option == 0) # Naive with caret TVC
{
train_index = caret::createDataPartition(y=data$TVC, p=0.7, list=FALSE, times=1)
train <- data[train_index,]
test <- data[-train_index,]
}
else if(option == 1) # Splitted equally with time, <70 is low, 70<time<140 is med, >140 is high
{
data$Time <- as.numeric(as.character((stringr::str_extract(rownames(data),"[0-9]*(?=h)"))))
data <- data[order(data$Time), ] # Order by storage times
low <- data[70, 'Time' ]
med <- data[140, 'Time']
# Create new column for time intervals
data$Timeinterval <- ifelse(data$Time <= low, 'low',
ifelse(data$Time > low & data$Time <= med, 'med',
ifelse(data$Time > med, 'high', '')))
# Find smallest dataset and assign k from that (taking equal from all intervals)
k <- min(length(which(data$Timeinterval == 'low')),
length(which(data$Timeinterval == 'med')),
length(which(data$Timeinterval == 'high'))) * 0.7 + 6
train <- data.frame()
test <- data.frame()
for(level in c('low','med','high'))
{
dataLevel <- data[data$Timeinterval == level,] # Only select current interval
p <- k / nrow(dataLevel) # Assign p for caret createDataPartition function
train_index <- caret::createDataPartition(y=dataLevel$TVC, p=p, list=FALSE, times=1)
# Append the new sample rows
train <- rbind(train, dataLevel[train_index,])
test <- rbind(test, dataLevel[-train_index,])
}
train <- train[,-which(colnames(train)%in%"Time")]
test <- test[,-which(colnames(test)%in%"Time")]
}
else if(option == 2) # Split equally by temperature
{
data$Temperature <- as.numeric(as.character(stringr::str_extract(rownames(data),"[0-9]*((?=C)|(?=B)|(?=A))")))
data[is.na(data)] <- 0
# Find smallest dataset and assign k from that (taking equal from all intervals)
k <- min(length(which(data$Temperature == 0)),
length(which(data$Temperature == 5)),
length(which(data$Temperature == 10)),
length(which(data$Temperature == 15))) * 0.7 + 6
train <- data.frame()
test <- data.frame()
for(temp in c(0,5,10,15))
{
dataTemperature = data[data$Temperature == temp,] # Only select current temp
p = k / nrow(dataTemperature) # Assign p for caret createDataPartition function
train_index = caret::createDataPartition(y=dataTemperature$TVC, p=p, list=FALSE, times=1)
# Append the new sample rows
train <- rbind(train, dataTemperature[train_index,])
test <- rbind(test, dataTemperature[-train_index,])
}
train <- train[,-which(colnames(train)%in%"Temperature")]
test <- test[,-which(colnames(test)%in%"Temperature")]
}
else if(option == 3) # Kennard-stone algorithm
{
ken <- kenStone(data, k=nrow(data)*0.7, metric="euclid")
train <- data[ken$model,]
test <- data[ken$test,]
}
return(list("train" = train, "test" = test))
}
# Custom predict functi10on which prints & returns prediction vs actual plot and displays RMSE/ACC for a single model
custompredict <- function(model, test)
{
# Predict
prediction <- stats::predict(model, test)
# RMSE calc
rmse <- Metrics::rmse(test$TVC, prediction)
# Within 1 cfu accuracy calc
difference <- as.data.frame(abs(test$TVC-prediction))
accuracy <- sum(difference <= 1) / nrow(difference)
x <- cbind(prediction,test$TVC)
x <- as.data.frame(x[order(x[,2]),])
# Plot
title <- paste(model$method)
colnames(x) <- c("Predicted", "Actual")
plot <- ggplot(data=x, aes(x=Actual,y=Predicted)) +
geom_point() +
ggtitle(title) +
ylim(c(0,9)) +
xlim(c(0,9)) +
geom_abline(col="blue") +
geom_abline(intercept = 1, col="red") +
geom_abline(intercept = -1, col="red") +
geom_label(label=paste("Accuracy: ", round(accuracy*100, digits = 2),"%"),
x=7, y=3, size=5, family="DMOFCB+AdvGulliv-R",label.size = 0) +
geom_label(label=paste("RMSE: ", round(rmse, digits = 4)),
x=7, y=3.5, size=5, family="DMOFCB+AdvGulliv-R", label.size = 0) +
theme_bw(base_family = "DMOFCB+AdvGulliv-R") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
plot.title = element_text(hjust=0.5), legend.position = "none") +
xlab(expression(paste("Sample Bacterial Count log"[10]," cfu g"^-1))) +
ylab(expression(paste("Predicted Bacterial Count log"[10]," cfu g"^-1)))
print(plot)
}
# Filter wavelengths from FTIR, list = waves to keep as integer ranges (i.e. 700:1000, 2000:2500)
keepwave <- function(data, list)
{
# Transpose to filter wave-lengths
data_T <- as.data.frame(t(data))
# Add waves as a column
data_T$wave <- round(as.numeric(stringr::str_extract(rownames(data_T[,-1]),"[0-9]+\\.+[0-9]*")),0)
# Add temporary 0 to TVC wave
data_T["TVC","wave"] <- 0
list <- c(list, 0)
# Filter wavelengths (include TVC)
x <- data_T[data_T$wave %in% list, ]
# Transpose back
x <- as.data.frame(t(x))
return(x[-nrow(x),])
}
# Set up a grid list for CV
gridList <- function()
{
plsgrid <- expand.grid(ncomp = 1:40)
svmRadialgrid <- expand.grid(sigma = 2^c(-25, -20, -15, -10, -5, 0), C = 2^c(0:5))
svmLineargrid <- expand.grid(C = 2^c(0:5))
knngrid <- expand.grid(k = 1:40)
nnetgrid <- expand.grid(size = c(1:2), decay = seq(from = 0.1, to = 1, by = 0.1))
larsgrid <- expand.grid(fraction = seq(from = 0, to = 1, by = 0.01))
ridgegrid <- expand.grid(lambda=seq(0.00001,0.0001,0.00001))
rfgrid <- expand.grid(mtry=c(2,4,6,12,24,48))
# Create a list of grids
gridlist <- list(plsgrid=plsgrid, svmRadialgrid=svmRadialgrid, svmLineargrid=svmLineargrid,
knngrid=knngrid, nnetgrid=nnetgrid, larsgrid=larsgrid,
rfgrid=rfgrid, nnetgrid=nnetgrid, ridgegrid=ridgegrid)
return(gridlist)
}
# Splits the data depending on low and high parameters, plots the mean intensities of the split data over wavelengths
meanintensitygraph <- function(data, low, high)
{
dataF <- data[data$TVC <= low,]
dataS <- data[data$TVC >= high, ]
dataF <- as.data.frame(t(dataF))
dataF <- dataF[-1,]
dataFsums <- data.frame(matrix(ncol=2,nrow=ncol(data)-1))
dataFsums[,1] <- rowSums(dataF) / ncol(dataF)
rownames(dataFsums) <- rownames(dataF)
dataFsums <- transform(dataFsums, wave=stringr::str_extract(rownames(dataF),"([0-9]*)(?=\\.)"))
dataFsums <- dataFsums[,-2]
dataFsums$wave <- as.numeric(as.character(dataFsums$wave))
dataS <- as.data.frame(t(dataS))
dataS <- dataS[-1,]
dataSsums <- data.frame(matrix(ncol=2,nrow=ncol(data)-1))
dataSsums[,1] <- rowSums(dataS) / ncol(dataS)
rownames(dataSsums) <- rownames(dataS)
dataSsums <- transform(dataSsums, wave=stringr::str_extract(rownames(dataS),"([0-9]*)(?=\\.)"))
dataSsums <- dataSsums[,-2]
dataSsums$wave <- as.numeric(as.character(dataSsums$wave))
dataSub <- data.frame(matrix(ncol=4,nrow=ncol(data)-1))
dataSub[,1] <- dataFsums$X1
dataSub[,2] <- dataSsums$X1
dataSub[,3] <- dataFsums$wave
dataSub[,4] <- abs(dataSub[,2] - dataSub[,1])
dataSub <- dataSub[-nrow(dataSub), ]
colnames(dataSub) <- c("FreshSums", "SpoiledSums", "Wave","Difference")
plot(dataSub[,3], dataSub[,1], type="l", xlab="Wave", ylab="Intensity", col="green")
legend(x=1000, y=1.5, c("Fresh","Spoiled"),cex=1,col=c("green","red"),pch=c(15,15), bty = "n")
lines(dataSub[,3], dataSub[,2], col="red")
plot(dataSub[,3], abs(dataSub[,2] - dataSub[,1]), type="l", xlab="wave",ylab = "Abs intensity difference")
return(dataSub)
}
# Applies caret pre-process methods in methodlist to the data
# If pca pre-process is selected, pcaComp = components to keep
applypreprocess <- function(data, methodlist, pcaComp=0)
{
TVC <- data$TVC
data <- data[, -which(colnames(data)%in%"TVC")]
if(pcaComp > 0) {
pp <- preProcess(data, method = methodlist, pcaComp = pcaComp)
} else {
pp <- preProcess(data, method = methodlist)
}
data <- predict(pp, data)
data$TVC <- TVC
return(data)
}
# Creates a hyperspec object, type is either "FTIR" or else ("MSI")
createspc <- function(data, type="FTIR") # Removes TVC
{
data <- data[,-which(colnames(data) %in% "TVC")]
if(type == "FTIR") {
waves <- as.numeric(stringr::str_extract(colnames(data),"[0-9]+\\.+[0-9]*"))
spc <- new("hyperSpec", spc=data, wavelength=waves)
}
else {
spc <- new("hyperSpec", spc=data, wavelength=as.numeric(colnames(data)))
}
return(spc)
}
# Applies spectral pre-process from the hyperspec package
# sgwindow = Savitzky-Golay filtering window, deriv = derivation factor, bin = binning factor
# type = "FTIR" or "VM", normalarea = normalise under area or a point
applyspectrapreprocess <- function(data, preproc, sgwindow=0, deriv=1, bin=4, type="FTIR", normalarea=F)
{
TVC <- data$TVC
if(preproc == "baseline") {
spc <- createspc(data,type)
bl <- spc.fit.poly.below(spc)
spc <- spc - bl
data <- as.data.frame(spc$spc)
data$TVC <- TVC
} else if(preproc == "snv") {
data <- data[,-which(colnames(data) %in% "TVC")]
data <- as.data.frame(standardNormalVariate(data))
data$TVC <- TVC
} else if(preproc == "snv-d") {
data <- as.data.frame(detrend(X=spc$spc, wav=attributes(spc)$wavelength))
} else if(preproc == "s-g"){
data <- data[,-which(colnames(data) %in% "TVC")]
data <- as.data.frame(savitzkyGolay(data, m=2,p=3,w=sgwindow))
data$TVC <- TVC
} else if(preproc == "deriv"){
spc <- createspc(data,"VM")
data <- as.data.frame(t(diff(t(spc$spc), differences=deriv)))
data$TVC <- TVC
} else if(preproc == "normal"){
spc <- createspc(data,type)
if(normalarea){
spc <- spc/ rowMeans(spc[, ,1500~1700])
} else if(type=="FTIR" & !normalarea){
factors <- 1/apply(spc[, ,1500~1700],1,mean)
spc <- sweep(spc,1,factors,"*")
} else{
spc <- sweep(spc,1,mean,"/")
}
data <- as.data.frame(spc$spc)
data$TVC <- TVC
} else if(preproc == "bin"){
spc <- createspc(data,type)
spc <- spc.bin(spc,bin)
data <- as.data.frame(spc$spc)
data$TVC <- TVC
}
return(data)
}
# Predicts and calculates performances of multiple models on a test data
# models = caret model list, test = test data
# plot = T would plot each model's predicted vs actual with custompredict()
multiplemodelpredict <- function(models, test, plot=F)
{
rmseResults <- data.frame(matrix(nrow=length(models),ncol=5))
colnames(rmseResults) <- c("RMSE", "MAE","R2", "Accuracy","name")
for(i in 1:length(models))
{
model <- models[[i]]
# RMSE
prediction <- predict(model, test)
rmse <- Metrics::rmse(test$TVC, prediction)
# R2
r2 <- MLmetrics::R2_Score(test$TVC,prediction)
# MAE
mae <- Metrics::mae(test$TVC,prediction) # Calculate mae for the current model and iter
# Within 1 cfu accuracy
difference <- as.data.frame(abs(test$TVC-prediction))
accuracy <- sum(difference <= 1) / nrow(difference)
if(plot == T)
{
custompredict(model, test)
}
rmseResults[i,] <- list(rmse, mae, r2, accuracy, model$method)
}
rownames(rmseResults) <- rmseResults[,5]
modelmethods <- rmseResults$name
rmseResults <- round(rmseResults[,1:4],4)
rmseResults$name <- modelmethods
return(rmseResults)
}
# A.k.a monte-carlo CV, does multiple iterations with multiple models, multiple splits and calculate average performance metrics
multipleiterations <- function(data, iters, parallelcores = 3)
{
x <- iters
methodlist <- c("pls", "svmLinear", "svmRadial","rf","knn", "pcr","lars", "ridge", "nnet")
rmseResults <- data.frame(matrix(nrow=length(methodlist), ncol=1)) # DF to store RMSE results
rownames(rmseResults) <- methodlist
maeResults <- data.frame(matrix(nrow=length(methodlist), ncol=1)) # DF to store RMSE results
rownames(maeResults) <- methodlist
r2Results <- data.frame(matrix(nrow=length(methodlist), ncol=1)) # DF to store RMSE results
rownames(r2Results) <- methodlist
accResults <- data.frame(matrix(NA, nrow=length(methodlist), ncol=1)) # DF to save acc results
rownames(accResults) <- methodlist
max <- 0
min <- 50
cl <- makePSOCKcluster(parallelcores) # Parelell processing with 3 cores
registerDoParallel(cl)
for(i in 1:x)
{
print(paste("Starting iteration", i ,"..."))
starttime <- Sys.time()
set.seed(as.integer((as.double(Sys.time())*1000+Sys.getpid()) %% 2^31))
# Split to train and test
return <- split(data, 0)
train <- return$train
test <- return$test
gridlist <- gridList()
fitControl <- trainControl(method = "repeatedcv", number = 10, repeats = 3,
savePredictions = "final"
,allowParallel = T)
models <- caretEnsemble::caretList(TVC~., data=train, trControl = fitControl, metric = "RMSE", continue_on_fail = T
,tuneList = list(
pls=caretModelSpec(method="pls", tuneGrid=gridlist$plsgrid)
,svmLinear=caretModelSpec(method="svmLinear", tuneGrid=gridlist$svmLineargrid)
,svmRadial=caretModelSpec(method="svmRadial", tuneGrid=gridlist$svmRadialgrid)
,rf=caretModelSpec(method="rf", tuneGrid=gridlist$rfgrid, ntree=500)
,knn=caretModelSpec(method="knn", tuneGrid=gridlist$knngrid)
,pcr=caretModelSpec(method="pcr", tuneGrid=gridlist$plsgrid)
,lars=caretModelSpec(method="lars", tuneGrid=gridlist$larsgrid)
,ridge=caretModelSpec(method="ridge", tuneGrid=gridlist$ridgegrid)
,nnet=caretModelSpec(method="nnet", tuneGrid=gridlist$nnetgrid, linout = TRUE, maxit=1000)
)
)
# Predictions for test set
predictionsDF <- as.data.frame(predict(models, newdata = test))
# Calculate prediction difference
predDiff <- abs(predictionsDF - test$TVC)
# Calculate rmse for each model
rmseVector <- c()
maeVector <- c()
r2Vector <- c()
for(k in 1:length(models))
{
# RMSE
rmse <- Metrics::rmse(test$TVC,predictionsDF[,k]) # Calculate rmse for the current model and iter
rmseVector <- c(rmseVector, rmse) # Add to the rmselist for all rmse on this iter
# MAE
mae <- Metrics::mae(test$TVC,predictionsDF[,k]) # Calculate mae for the current model and iter
maeVector <- c(maeVector, mae) # Add to the rmselist for all mae on this iter
# R-squared
r2 <- MLmetrics::R2_Score(test$TVC,predictionsDF[,k]) # Calculate rsquared for the current model and iter
r2Vector <- c(r2Vector, r2) # Add to the rmselist for all rsquared on this iter
# ACCURACY
# Check how many predictions are within 1 cfu/log
table <- table(predDiff[,k] < 1)
if(is.na(table[2])){
pred <- 1
} else {
pred <- as.numeric(table[2]) / as.numeric(table[1] + table[2])
}
accResults[k,i] <- pred # Store current model+iter accuracy on the DF
}
# Add RMSEs together for this iteration
rmseResults <- cbind(rmseResults,rmseVector)
# Add MAEs together for this iteration
maeResults <- cbind(maeResults,maeVector)
# Add R-squareds together for this iteration
r2Results <- cbind(r2Results,r2Vector)
print(paste("This iteration took:", round(Sys.time() - starttime,3)))
}
stopCluster(cl) # Stop parellel processing
registerDoSEQ()
rmseResults <- rmseResults[,-1, drop=F]
maeResults <- maeResults[,-1, drop=F]
r2Results <- r2Results[,-1, drop=F]
colnames(rmseResults) <- 1:x
colnames(maeResults) <- 1:x
colnames(r2Results) <- 1:x
colnames(accResults) <- 1:x
rmseResults <- round(rmseResults,4)
maeResults <- round(maeResults,4)
r2Results <- round(r2Results,4)
accResults <- round(accResults,4)
returnlist <- list(rmseResults=rmseResults, maeResults=maeResults,
r2Results=r2Results, accResults=accResults)
return(returnlist)
}
# Combines performance metrics(recieved from multipleiterations())into one data frame
getfinalresults <- function(rmseResults, maeResults, r2Results, accResults)
{
# RMSE results
rmseResults$mean <- apply(rmseResults,1,FUN=mean)
# MAE results
maeResults$mean <- apply(maeResults,1,FUN=mean)
# R2 results
r2Results$mean <- apply(r2Results,1,FUN=mean)
# Accuracy results
accResults$mean <- apply(accResults,1,FUN=mean)
# Mean combined final results
finalresults <- data.frame(matrix(nrow=nrow(rmseResults),ncol=4)) # Create results DF
rownames(finalresults) <- rownames(accResults)
colnames(finalresults) <- c("RMSE", "MAE","R2","Accuracy")
finalresults[,1] <- round(rmseResults$mean,4)
finalresults[,2] <- round(maeResults$mean,4)
finalresults[,3] <- round(r2Results$mean,4)
finalresults[,4] <- round(accResults$mean,4)
finalresults$name <- rownames(finalresults)
return(finalresults)
}
# Does multiple iterations with a model on a data frame and averages variable importance over iterations
# Only pls and nnet supported
variableimportancetest <- function(data, iters, model, parallelcores = 3)
{
cl <- makePSOCKcluster(parallelcores) # Set parallel processing
registerDoParallel(cl)
result <- data.frame(matrix(ncol=1,nrow=ncol(data)-1))
for(i in 1:iters)
{
print(paste("Iteration",i))
starttime <- Sys.time()
# Split to train and test
return <- split(data, 0)
train <- return$train
test <- return$test
set.seed(as.integer((as.double(Sys.time())*1000+Sys.getpid()) %% 2^31))
# Set grids
nnetgrid <- expand.grid(size = c(1,2), decay = seq(0.1, 1, 0.1))
plsgrid <- expand.grid(ncomp=seq(1,40,1))
# Set CV method
fitControl <- trainControl(method = "repeatedcv", number = 5, repeats=3
,savePredictions = "final"
,allowParallel = T)
# Run the model
if(model == "pls"){
models <- caretEnsemble::caretList(TVC~., data=data, trControl = fitControl, metric = "RMSE"
,tuneList = list(
pls=caretModelSpec(method="pls", tuneGrid=plsgrid)
)
)
} else if(model == "nnet"){
models <- caretEnsemble::caretList(TVC~., data=data, trControl = fitControl, metric = "RMSE"
,tuneList = list(
nnet=caretModelSpec(method="nnet", tuneGrid=nnetgrid,
linout = TRUE, maxit=1000, MaxNWts=7000)
)
)
}
# Get variable importance
imp <- as.data.frame(varImp(models[[1]])$importance)
result <- cbind(result,imp)
print(paste("Iteration took", Sys.time()-starttime))
}
stopCluster(cl)
registerDoSEQ()
result <- result[,-1]
# Put mean as extra col
result$mean <- apply(result,1,FUN=mean)
# Extract mean importance corresponding to integer waves
impmean <- result[,"mean",drop=F]
impmean <- transform(impmean, wave=as.numeric(stringr::str_extract(rownames(impmean),"[0-9]+\\.+[0-9]*")))
impmean <- impmean[order(-impmean$mean), ]
impmean$wave <- round(impmean$wave,0)
return(list(iterresult = result, meanimp = impmean))
}
# Plots a heatmap with multiple results from getfinalresults()
# resultlist = list(finalresults1, finalresult2,..), names = labels for each result (B1, B2..)
# excludeModel = exclude these models by name ("knn", "rf"..)
# includeModel = include these models by name ("knn", "rf"..)
finalheatmap <- function(resultlist, names, excludeModel = c(), includeModel = c(), limits=c(50,90))
{
getperformance <- function(resultlist, names, perfname)
{
for(i in 1:length(resultlist))
{
resultlist[[i]] <- resultlist[[i]][,perfname, drop=F]
resultlist[[i]]$model <- rownames(resultlist[[i]])
resultlist[[i]]$batch <- rep(names[i],nrow(resultlist[[i]]))
}
return(resultlist)
}
rmselist <- getperformance(resultlist, names, "RMSE")
maelist <- getperformance(resultlist, names, "MAE")
rsquaredlist <- getperformance(resultlist, names, "R2")
accuracylist <- getperformance(resultlist, names, "Accuracy")
forheatmapRMSE <- ldply(rmselist, data.frame)
forheatmapMAE <- ldply(maelist, data.frame)
forheatmapR2 <- ldply(rsquaredlist, data.frame)
forheatmapAcc <- ldply(accuracylist, data.frame)
forheatmapAcc$Accuracy <- forheatmapAcc$Accuracy * 100
for(model in excludeModel)
{
forheatmapRMSE <- forheatmapRMSE[which(!forheatmapRMSE$model %in% excludeModel),]
forheatmapMAE <- forheatmapMAE[which(!forheatmapMAE$model %in% excludeModel),]
forheatmapR2 <- forheatmapR2[which(!forheatmapR2$model %in% excludeModel),]
forheatmapAcc <- forheatmapAcc[which(!forheatmapAcc$model %in% excludeModel),]
}
for(model in includeModel)
{
forheatmapRMSE <- forheatmapRMSE[which(forheatmapRMSE$model %in% includeModel),]
forheatmapR2 <- forheatmapR2[which(forheatmapR2$model %in% excludeModel),]
forheatmapAcc <- forheatmapAcc[which(forheatmapAcc$model %in% excludeModel),]
forheatmapAcc <- forheatmapAcc[which(forheatmapAcc$model %in% includeModel),]
}
forheatmapAcc <- forheatmapAcc[order(-forheatmapAcc$Accuracy),]
plot <- ggplot(forheatmapAcc, aes(batch, reorder(model,Accuracy))) +
geom_tile(aes(fill=Accuracy)) +
scale_fill_gradientn(colors=colorRampPalette(c("tomato2","green"))(3), limits=limits, oob=squish) +
theme_bw(base_size = 20) +
labs(x="", y="") +
scale_x_discrete(expand=c(0,0)) +
scale_y_discrete(expand=c(0,0)) +
geom_text(data=forheatmapAcc,aes(batch,model, label=paste("Acc:",Accuracy,"%")),
vjust=-1, hjust=1, size=4.5, family="DMOFCB+AdvGulliv-R") +
geom_text(data=forheatmapRMSE,aes(batch,model,label=paste("RMSE:",RMSE)),
vjust=2, hjust=1, size=4.5, family="DMOFCB+AdvGulliv-R") +
geom_text(data=forheatmapMAE,aes(batch,model,label=paste("MAE:",MAE)),
vjust=-1, hjust=-0.2, size=4.5, family="DMOFCB+AdvGulliv-R") +
geom_text(data=forheatmapR2,aes(batch,model,label=paste("R2:",R2)),
vjust=2, hjust=-0.2, size=4.5, family="DMOFCB+AdvGulliv-R") +
theme(legend.position = "none", text=element_text(family="DMOFCB+AdvGulliv-R"))
print(plot)
return(plot)
}
#----(NOT USED IN PAPER)----
# Plots the importance of variables over wavelengths
plotvariableimportance <- function(model)
{
imp <- as.data.frame(varImp(model)$importance)
imp <- transform(imp, wave=as.numeric(stringr::str_extract(rownames(imp),"[0-9]+\\.+[0-9]*")))
imp <- imp[order(imp$wave), ]
ggplot(data=imp, aes(x=wave, y=Overall)) +
geom_point()
}
# Filter waves according to RF variable importance, returns filtered data
# topx = Return FTIR data with topx waves. threshold = Return FTIR data with filtering values under threshold
# if topx and threshold is 0, returns the importance list
findandkeepwavesRF <- function(data, topx=0, threshold=0) # no topx argument returns the list
{
cl <- makePSOCKcluster(3)
registerDoParallel(cl)
# Find optimal mtry value (RF parameter)
tune <- tuneRF(data[,-1], data[,1], trace = FALSE, plot = FALSE)
mtry <- as.numeric(row.names(tune)[(which(tune[,2]==min(tune[,2])))])
# Train model
model <- randomForest(TVC~., data=data, mtry = mtry, ntree=1000, importance=T)
importance <- as.data.frame(model$importanceSD)
importance <- transform(importance, wave=stringr::str_extract(rownames(importance),"([0-9]*)(?=\\.)"))
importance$wave <- as.numeric(as.character(importance$wave))
stopCluster(cl)
registerDoSEQ()
if(topx != 0) {
keeplist <- top_n(importance, topx, importance$model.importanceSD)$wave
} else if(threshold !=0){
keeplist <- importance[importance$model.importanceSD > threshold,]$wave
} else {
return(importance)
}
data <- keepwave(data, keeplist)
return(data)
}
# Plots std of wavelengths
plotSTD <- function(data)
{
data_T <- as.data.frame(t(data)) # Transpose
data_T <- data_T[-which(rownames(data_T) %in% "TVC"),] # Remove TVC row
data_T <- transform(data_T,SD=apply(data_T,1,sd)) # SD of every row
data_T$waves <- as.numeric(stringr::str_extract(rownames(data_T),"[0-9]+\\.+[0-9]*")) # Create waves as column
plot <- ggplot(data=data_T, aes(x=waves,y=SD)) +
geom_line()+
xlab("Wave length")+
ylab("Std")+
ggtitle("Standard deviation of intensity over wave lengths") +
scale_x_continuous("Wave", breaks=seq(500,4000,500)) +
theme_classic()
print(plot)
return(data_T[,c("SD","waves")])
}
# Filters the data according to the std values
# std = std data frame, threshold = lowest std allowed
filterwavesfromSTD <- function(data, std, threshold)
{
std <- std[which(std$SD > threshold),]
names <- c(rownames(std),'TVC')
data <- data[, colnames(data) %in% names]
return(data)
}
# Calculates and plots the linear model coefficients of a data
lmcoefficient <- function(data)
{
model <- lm(TVC~., data=data)
coef <- as.data.frame(model$coefficients)
coef <- transform(coef, wave=as.numeric(stringr::str_extract(rownames(coef),"([0-9]*)(?=\\.)")))
coef <- coef[-1,]
coef <- na.omit(coef)
plot <- ggplot(data=coef, aes(x=wave, y=model.coefficients)) +
geom_line()
print(plot)
return(coef)
}
# Filters waves in FTIR by contribution to PCA
# contribution = contribution data frame calculated elsewhere, threshold = values under threshold will be filtered
filterwavesbyPCAcontrib <- function(data, contribution, threshold)
{
filterlist <- rownames(contribution[contribution$sum > threshold,]) # Filter contribs below threshold
filterlist <- str_extract(filterlist, "[0-9]*(?=\\.)") # Convert row names to wave numbers
data <- keepwave(data, filterlist) # Filter the data
return(data)
}
# Tries to find patterns of train/test split from a list of RMSE results for multiple models
findpatternbestmodels <- function(RMSEbestlist)
{
RMSEbesttrain <- cbind(rownames(RMSEbestlist[[1]]$trainingData), rownames(RMSEbestlist[[2]]$trainingData))
for(i in 3:length(RMSEbestlist))
{
RMSEbesttrain <- cbind(RMSEbesttrain, rownames(RMSEbestlist[[i]]$trainingData))
}
countbest <- data.frame(matrix(ncol=2)) # create DF to store count for each sample appeared in training sets
for(rowname in rownames(data)) # Check each sample
{
countbest <- rbind(countbest, length(which(RMSEbesttrain == rowname)))
}
countbest <- countbest[-1,] # remove NA row
rownames(countbest) <- rownames(data) # set row names as sample names
countbest <- countbest[,-2, drop=F] # drop unnecessary
colnames(countbest)[1] <- "Count"
countbest$pct <- round(countbest$Count / length(RMSEbestlist), 2) # What percent did the sample got used in all iters
return(countbest)
}
|
setwd("C:/Users/nitin/OneDrive/WIP/Auto")
from_date<- as.Date(c("2015-06-04"))
##################################################QWERTY
INfile<-read.csv("Open SR's.csv")
z<-INfile[order(INfile$Owner.Group),]
#CoreAc<-c(z$Owner.Group=="C-BOI-IE-AMS-CARDS",z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING")
Name<-"BOOK KEEPING"
Bkk<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING"), ]
write.table(Name, file = "out2.csv",row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Bkk,file="out2.csv",append=TRUE,sep=",",row.names=FALSE)
new_date<-as.POSIXlt(strptime(Bkk$Reported.Date, format="%d/%m/%Y"))
new_date<-as.Date(new_date)
age<-from_date-new_date
age_bucket<-age
for(i in seq_along(age)) {
if(age[i]<8) {
age_bucket[i]<- 7
}
else if(age[i]<16){
age_bucket[i]<-15
}
else if(age[i]<31){
age_bucket[i]<-30
}
else if(age[i]<61){
age_bucket[i]<-60
}
else if(age[i]<121){
age_bucket[i]<-120
}
else {age_bucket[i]<-999
}
}
Bkk$agebucket<-age_bucket
Bkk_SR_Open<-as.data.frame(table(age_bucket))
nodata <- data.frame(Age_0_to_7= numeric(0), Age_8_to_15= numeric(0), Age_16_to_30 = numeric(0), Age_31_to_60 = numeric(0), Col_61_to_120 = numeric(0))
Name<-"CARDS"
Cards<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CARDS"), ]
write.table(Name, file = "out2.csv",row.names=FALSE,append=TRUE, na="",col.names=FALSE, sep=",")
write.table(Cards,file="out2.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CIS Lending"
CIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CISLENDING"), ]
write.table(Name, file = "out2.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(CIS,file="out2.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"ITEM PROCESSING"
ITEM<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-ITEMPROC"), ]
write.table(Name, file = "out2.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(ITEM,file="out2.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"MIS"
MIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-MIS"), ]
write.table(Name, file = "out2.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(MIS,file="out2.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"PRINT"
PRINT<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-PRINT"), ]
write.table(Name, file = "out2.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(PRINT,file="out2.csv",append=TRUE,sep=",",row.names=FALSE)
############PR's######################################
INfile<-read.csv("Open PR's.csv")
z<-INfile[order(INfile$Owner.Group),]
#CoreAc<-c(z$Owner.Group=="C-BOI-IE-AMS-CARDS",z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING")
Name<-"BOOK KEEPING"
Bkk<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING"), ]
write.table(Name, file = "out3.csv",row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Bkk,file="out3.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CARDS"
Cards<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CARDS"), ]
write.table(Name, file = "out3.csv",row.names=FALSE,append=TRUE, na="",col.names=FALSE, sep=",")
write.table(Cards,file="out3.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CIS Lending"
CIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CISLENDING"), ]
write.table(Name, file = "out3.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(CIS,file="out3.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"ITEM PROCESSING"
ITEM<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-ITEMPROC"), ]
write.table(Name, file = "out3.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(ITEM,file="out3.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"MIS"
MIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-MIS"), ]
write.table(Name, file = "out3.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(MIS,file="out3.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"PRINT"
PRINT<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-PRINT"), ]
write.table(Name, file = "out3.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(PRINT,file="out3.csv",append=TRUE,sep=",",row.names=FALSE)
######################################################################################################
# PART 2 - NEW IN SR PR SR
######################################################################################################
INfile<-read.csv("New IN's.csv")
z<-INfile[order(INfile$Owner.Group),]
#CoreAc<-c(z$Owner.Group=="C-BOI-IE-AMS-CARDS",z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING")
Name<-"BOOK KEEPING"
Bkk<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING"), ]
write.table(Name, file = "out4.csv",row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Bkk,file="out4.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CARDS"
Cards<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CARDS"), ]
write.table(Name, file = "out4.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Cards,file="out4.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CIS Lending"
CIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CISLENDING"), ]
write.table(Name, file = "out4.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(CIS,file="out4.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"ITEM PROCESSING"
ITEM<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-ITEMPROC"), ]
write.table(Name, file = "out4.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(ITEM,file="out4.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"MIS"
MIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-MIS"), ]
write.table(Name, file = "out4.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(MIS,file="out4.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"PRINT"
PRINT<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-PRINT"), ]
write.table(Name, file = "out4.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(PRINT,file="out4.csv",append=TRUE,sep=",",row.names=FALSE)
##################################################
INfile<-read.csv("New SR's.csv")
z<-INfile[order(INfile$Owner.Group),]
#CoreAc<-c(z$Owner.Group=="C-BOI-IE-AMS-CARDS",z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING")
Name<-"BOOK KEEPING"
Bkk<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING"), ]
write.table(Name, file = "out5.csv",row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Bkk,file="out5.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CARDS"
Cards<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CARDS"), ]
write.table(Name, file = "out5.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Cards,file="out5.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CIS Lending"
CIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CISLENDING"), ]
write.table(Name, file = "out5.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(CIS,file="out5.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"ITEM PROCESSING"
ITEM<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-ITEMPROC"), ]
write.table(Name, file = "out5.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(ITEM,file="out5.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"MIS"
MIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-MIS"), ]
write.table(Name, file = "out5.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(MIS,file="out5.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"PRINT"
PRINT<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-PRINT"), ]
write.table(Name, file = "out5.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(PRINT,file="out5.csv",append=TRUE,sep=",",row.names=FALSE)
############PR's######################################
INfile<-read.csv("New PR's.csv")
z<-INfile[order(INfile$Owner.Group),]
#CoreAc<-c(z$Owner.Group=="C-BOI-IE-AMS-CARDS",z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING")
Name<-"BOOK KEEPING"
Bkk<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING"), ]
write.table(Name, file = "out6.csv",row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Bkk,file="out6.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CARDS"
Cards<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CARDS"), ]
write.table(Name, file = "out6.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Cards,file="out6.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CIS Lending"
CIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CISLENDING"), ]
write.table(Name, file = "out6.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(CIS,file="out6.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"ITEM PROCESSING"
ITEM<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-ITEMPROC"), ]
write.table(Name, file = "out6.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(ITEM,file="out6.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"MIS"
MIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-MIS"), ]
write.table(Name, file = "out6.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(MIS,file="out6.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"PRINT"
PRINT<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-PRINT"), ]
write.table(Name, file = "out6.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(PRINT,file="out6.csv",append=TRUE,sep=",",row.names=FALSE)
######################################################################################################
# PART 3 - CLOSE NEW IN SR PR SR
######################################################################################################
INfile<-read.csv("Closed IN's.csv")
z<-INfile[order(INfile$Owner.Group),]
#CoreAc<-c(z$Owner.Group=="C-BOI-IE-AMS-CARDS",z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING")
Name<-"BOOK KEEPING"
Bkk<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING"), ]
write.table(Name, file = "out7.csv",row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Bkk,file="out7.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CARDS"
Cards<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CARDS"), ]
write.table(Name, file = "out7.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Cards,file="out7.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CIS Lending"
CIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CISLENDING"), ]
write.table(Name, file = "out7.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(CIS,file="out7.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"ITEM PROCESSING"
ITEM<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-ITEMPROC"), ]
write.table(Name, file = "out7.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(ITEM,file="out7.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"MIS"
MIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-MIS"), ]
write.table(Name, file = "out7.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(MIS,file="out7.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"PRINT"
PRINT<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-PRINT"), ]
write.table(Name, file = "out7.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(PRINT,file="out7.csv",append=TRUE,sep=",",row.names=FALSE)
##################################################
INfile<-read.csv("Closed SR's.csv")
z<-INfile[order(INfile$Owner.Group),]
#CoreAc<-c(z$Owner.Group=="C-BOI-IE-AMS-CARDS",z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING")
Name<-"BOOK KEEPING"
Bkk<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING"), ]
write.table(Name, file = "out8.csv",row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Bkk,file="out8.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CARDS"
Cards<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CARDS"), ]
write.table(Name, file = "out8.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Cards,file="out8.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CIS Lending"
CIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CISLENDING"), ]
write.table(Name, file = "out8.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(CIS,file="out8.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"ITEM PROCESSING"
ITEM<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-ITEMPROC"), ]
write.table(Name, file = "out8.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(ITEM,file="out8.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"MIS"
MIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-MIS"), ]
write.table(Name, file = "out8.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(MIS,file="out8.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"PRINT"
PRINT<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-PRINT"), ]
write.table(Name, file = "out8.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(PRINT,file="out8.csv",append=TRUE,sep=",",row.names=FALSE)
############PR's######################################
INfile<-read.csv("Closed PR's.csv")
z<-INfile[order(INfile$Owner.Group),]
#CoreAc<-c(z$Owner.Group=="C-BOI-IE-AMS-CARDS",z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING")
Name<-"BOOK KEEPING"
Bkk<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING"), ]
write.table(Name, file = "out9.csv",row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Bkk,file="out9.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CARDS"
Cards<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CARDS"), ]
write.table(Name, file = "out9.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Cards,file="out9.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CIS Lending"
CIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CISLENDING"), ]
write.table(Name, file = "out9.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(CIS,file="out9.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"ITEM PROCESSING"
ITEM<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-ITEMPROC"), ]
write.table(Name, file = "out9.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(ITEM,file="out9.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"MIS"
MIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-MIS"), ]
write.table(Name, file = "out9.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(MIS,file="out9.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"PRINT"
PRINT<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-PRINT"), ]
write.table(Name, file = "out9.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(PRINT,file="out9.csv",append=TRUE,sep=",",row.names=FALSE)
#######################output#######################################
library(xlsx)
out <- read.csv("C:/Users/nitin/OneDrive/WIP/Auto/out.csv", header=FALSE, na.strings="")
out2 <- read.csv("C:/Users/nitin/OneDrive/WIP/Auto/out2.csv", header=FALSE, na.strings="")
out3 <- read.csv("C:/Users/nitin/OneDrive/WIP/Auto/out3.csv", header=FALSE, na.strings="")
out4 <- read.csv("C:/Users/nitin/OneDrive/WIP/Auto/out4.csv", header=FALSE, na.strings="")
out5 <- read.csv("C:/Users/nitin/OneDrive/WIP/Auto/out5.csv", header=FALSE, na.strings="")
out6 <- read.csv("C:/Users/nitin/OneDrive/WIP/Auto/out6.csv", header=FALSE, na.strings="")
out7 <- read.csv("C:/Users/nitin/OneDrive/WIP/Auto/out7.csv", header=FALSE, na.strings="")
out8 <- read.csv("C:/Users/nitin/OneDrive/WIP/Auto/out8.csv", header=FALSE, na.strings="")
out9 <- read.csv("C:/Users/nitin/OneDrive/WIP/Auto/out9.csv", header=FALSE, na.strings="")
write.xlsx(out, file="Consolidated.xlsx", sheetName="Open_IN",row.names=FALSE,col.names=FALSE)
write.xlsx(out2, file="Consolidated.xlsx", sheetName="Open_SR", append=TRUE,row.names=FALSE,col.names=FALSE)
write.xlsx(out3, file="Consolidated.xlsx", sheetName="Open_PR", append=TRUE,row.names=FALSE,col.names=FALSE)
write.xlsx(out4, file="Consolidated.xlsx", sheetName="New_IN", append=TRUE,row.names=FALSE,col.names=FALSE)
write.xlsx(out5, file="Consolidated.xlsx", sheetName="New_SR", append=TRUE,row.names=FALSE,col.names=FALSE)
write.xlsx(out6, file="Consolidated.xlsx", sheetName="New_PR", append=TRUE,row.names=FALSE,col.names=FALSE)
write.xlsx(out7, file="Consolidated.xlsx", sheetName="Closed_IN", append=TRUE,row.names=FALSE,col.names=FALSE)
write.xlsx(out8, file="Consolidated.xlsx", sheetName="Closed_SR", append=TRUE,row.names=FALSE,col.names=FALSE)
write.xlsx(out9, file="Consolidated.xlsx", sheetName="Closed_PR", append=TRUE,row.names=FALSE,col.names=FALSE)
##############Delete intermdeiate Files###########
file.remove("C:/Users/nitin/OneDrive/WIP/Auto/out.csv")
file.remove("C:/Users/nitin/OneDrive/WIP/Auto/out2.csv")
file.remove("C:/Users/nitin/OneDrive/WIP/Auto/out3.csv")
file.remove("C:/Users/nitin/OneDrive/WIP/Auto/out4.csv")
file.remove("C:/Users/nitin/OneDrive/WIP/Auto/out5.csv")
file.remove("C:/Users/nitin/OneDrive/WIP/Auto/out6.csv")
file.remove("C:/Users/nitin/OneDrive/WIP/Auto/out7.csv")
file.remove("C:/Users/nitin/OneDrive/WIP/Auto/out8.csv")
file.remove("C:/Users/nitin/OneDrive/WIP/Auto/out9.csv")
|
/Auto2.R
|
no_license
|
NitinMahajan1/Automate-for-Dell
|
R
| false
| false
| 16,906
|
r
|
setwd("C:/Users/nitin/OneDrive/WIP/Auto")
from_date<- as.Date(c("2015-06-04"))
##################################################QWERTY
INfile<-read.csv("Open SR's.csv")
z<-INfile[order(INfile$Owner.Group),]
#CoreAc<-c(z$Owner.Group=="C-BOI-IE-AMS-CARDS",z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING")
Name<-"BOOK KEEPING"
Bkk<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING"), ]
write.table(Name, file = "out2.csv",row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Bkk,file="out2.csv",append=TRUE,sep=",",row.names=FALSE)
new_date<-as.POSIXlt(strptime(Bkk$Reported.Date, format="%d/%m/%Y"))
new_date<-as.Date(new_date)
age<-from_date-new_date
age_bucket<-age
for(i in seq_along(age)) {
if(age[i]<8) {
age_bucket[i]<- 7
}
else if(age[i]<16){
age_bucket[i]<-15
}
else if(age[i]<31){
age_bucket[i]<-30
}
else if(age[i]<61){
age_bucket[i]<-60
}
else if(age[i]<121){
age_bucket[i]<-120
}
else {age_bucket[i]<-999
}
}
Bkk$agebucket<-age_bucket
Bkk_SR_Open<-as.data.frame(table(age_bucket))
nodata <- data.frame(Age_0_to_7= numeric(0), Age_8_to_15= numeric(0), Age_16_to_30 = numeric(0), Age_31_to_60 = numeric(0), Col_61_to_120 = numeric(0))
Name<-"CARDS"
Cards<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CARDS"), ]
write.table(Name, file = "out2.csv",row.names=FALSE,append=TRUE, na="",col.names=FALSE, sep=",")
write.table(Cards,file="out2.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CIS Lending"
CIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CISLENDING"), ]
write.table(Name, file = "out2.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(CIS,file="out2.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"ITEM PROCESSING"
ITEM<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-ITEMPROC"), ]
write.table(Name, file = "out2.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(ITEM,file="out2.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"MIS"
MIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-MIS"), ]
write.table(Name, file = "out2.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(MIS,file="out2.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"PRINT"
PRINT<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-PRINT"), ]
write.table(Name, file = "out2.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(PRINT,file="out2.csv",append=TRUE,sep=",",row.names=FALSE)
############PR's######################################
INfile<-read.csv("Open PR's.csv")
z<-INfile[order(INfile$Owner.Group),]
#CoreAc<-c(z$Owner.Group=="C-BOI-IE-AMS-CARDS",z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING")
Name<-"BOOK KEEPING"
Bkk<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING"), ]
write.table(Name, file = "out3.csv",row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Bkk,file="out3.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CARDS"
Cards<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CARDS"), ]
write.table(Name, file = "out3.csv",row.names=FALSE,append=TRUE, na="",col.names=FALSE, sep=",")
write.table(Cards,file="out3.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CIS Lending"
CIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CISLENDING"), ]
write.table(Name, file = "out3.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(CIS,file="out3.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"ITEM PROCESSING"
ITEM<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-ITEMPROC"), ]
write.table(Name, file = "out3.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(ITEM,file="out3.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"MIS"
MIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-MIS"), ]
write.table(Name, file = "out3.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(MIS,file="out3.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"PRINT"
PRINT<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-PRINT"), ]
write.table(Name, file = "out3.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(PRINT,file="out3.csv",append=TRUE,sep=",",row.names=FALSE)
######################################################################################################
# PART 2 - NEW IN SR PR SR
######################################################################################################
INfile<-read.csv("New IN's.csv")
z<-INfile[order(INfile$Owner.Group),]
#CoreAc<-c(z$Owner.Group=="C-BOI-IE-AMS-CARDS",z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING")
Name<-"BOOK KEEPING"
Bkk<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING"), ]
write.table(Name, file = "out4.csv",row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Bkk,file="out4.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CARDS"
Cards<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CARDS"), ]
write.table(Name, file = "out4.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Cards,file="out4.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CIS Lending"
CIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CISLENDING"), ]
write.table(Name, file = "out4.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(CIS,file="out4.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"ITEM PROCESSING"
ITEM<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-ITEMPROC"), ]
write.table(Name, file = "out4.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(ITEM,file="out4.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"MIS"
MIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-MIS"), ]
write.table(Name, file = "out4.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(MIS,file="out4.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"PRINT"
PRINT<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-PRINT"), ]
write.table(Name, file = "out4.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(PRINT,file="out4.csv",append=TRUE,sep=",",row.names=FALSE)
##################################################
INfile<-read.csv("New SR's.csv")
z<-INfile[order(INfile$Owner.Group),]
#CoreAc<-c(z$Owner.Group=="C-BOI-IE-AMS-CARDS",z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING")
Name<-"BOOK KEEPING"
Bkk<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING"), ]
write.table(Name, file = "out5.csv",row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Bkk,file="out5.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CARDS"
Cards<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CARDS"), ]
write.table(Name, file = "out5.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Cards,file="out5.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CIS Lending"
CIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CISLENDING"), ]
write.table(Name, file = "out5.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(CIS,file="out5.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"ITEM PROCESSING"
ITEM<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-ITEMPROC"), ]
write.table(Name, file = "out5.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(ITEM,file="out5.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"MIS"
MIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-MIS"), ]
write.table(Name, file = "out5.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(MIS,file="out5.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"PRINT"
PRINT<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-PRINT"), ]
write.table(Name, file = "out5.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(PRINT,file="out5.csv",append=TRUE,sep=",",row.names=FALSE)
############PR's######################################
INfile<-read.csv("New PR's.csv")
z<-INfile[order(INfile$Owner.Group),]
#CoreAc<-c(z$Owner.Group=="C-BOI-IE-AMS-CARDS",z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING")
Name<-"BOOK KEEPING"
Bkk<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING"), ]
write.table(Name, file = "out6.csv",row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Bkk,file="out6.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CARDS"
Cards<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CARDS"), ]
write.table(Name, file = "out6.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Cards,file="out6.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CIS Lending"
CIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CISLENDING"), ]
write.table(Name, file = "out6.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(CIS,file="out6.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"ITEM PROCESSING"
ITEM<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-ITEMPROC"), ]
write.table(Name, file = "out6.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(ITEM,file="out6.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"MIS"
MIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-MIS"), ]
write.table(Name, file = "out6.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(MIS,file="out6.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"PRINT"
PRINT<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-PRINT"), ]
write.table(Name, file = "out6.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(PRINT,file="out6.csv",append=TRUE,sep=",",row.names=FALSE)
######################################################################################################
# PART 3 - CLOSE NEW IN SR PR SR
######################################################################################################
INfile<-read.csv("Closed IN's.csv")
z<-INfile[order(INfile$Owner.Group),]
#CoreAc<-c(z$Owner.Group=="C-BOI-IE-AMS-CARDS",z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING")
Name<-"BOOK KEEPING"
Bkk<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING"), ]
write.table(Name, file = "out7.csv",row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Bkk,file="out7.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CARDS"
Cards<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CARDS"), ]
write.table(Name, file = "out7.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Cards,file="out7.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CIS Lending"
CIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CISLENDING"), ]
write.table(Name, file = "out7.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(CIS,file="out7.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"ITEM PROCESSING"
ITEM<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-ITEMPROC"), ]
write.table(Name, file = "out7.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(ITEM,file="out7.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"MIS"
MIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-MIS"), ]
write.table(Name, file = "out7.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(MIS,file="out7.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"PRINT"
PRINT<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-PRINT"), ]
write.table(Name, file = "out7.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(PRINT,file="out7.csv",append=TRUE,sep=",",row.names=FALSE)
##################################################
INfile<-read.csv("Closed SR's.csv")
z<-INfile[order(INfile$Owner.Group),]
#CoreAc<-c(z$Owner.Group=="C-BOI-IE-AMS-CARDS",z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING")
Name<-"BOOK KEEPING"
Bkk<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING"), ]
write.table(Name, file = "out8.csv",row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Bkk,file="out8.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CARDS"
Cards<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CARDS"), ]
write.table(Name, file = "out8.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Cards,file="out8.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CIS Lending"
CIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CISLENDING"), ]
write.table(Name, file = "out8.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(CIS,file="out8.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"ITEM PROCESSING"
ITEM<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-ITEMPROC"), ]
write.table(Name, file = "out8.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(ITEM,file="out8.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"MIS"
MIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-MIS"), ]
write.table(Name, file = "out8.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(MIS,file="out8.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"PRINT"
PRINT<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-PRINT"), ]
write.table(Name, file = "out8.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(PRINT,file="out8.csv",append=TRUE,sep=",",row.names=FALSE)
############PR's######################################
INfile<-read.csv("Closed PR's.csv")
z<-INfile[order(INfile$Owner.Group),]
#CoreAc<-c(z$Owner.Group=="C-BOI-IE-AMS-CARDS",z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING")
Name<-"BOOK KEEPING"
Bkk<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-BOOKKEEPING"), ]
write.table(Name, file = "out9.csv",row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Bkk,file="out9.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CARDS"
Cards<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CARDS"), ]
write.table(Name, file = "out9.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(Cards,file="out9.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"CIS Lending"
CIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-CISLENDING"), ]
write.table(Name, file = "out9.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(CIS,file="out9.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"ITEM PROCESSING"
ITEM<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-ITEMPROC"), ]
write.table(Name, file = "out9.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(ITEM,file="out9.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"MIS"
MIS<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-MIS"), ]
write.table(Name, file = "out9.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(MIS,file="out9.csv",append=TRUE,sep=",",row.names=FALSE)
Name<-"PRINT"
PRINT<- z[ which(z$Owner.Group=="C-BOI-IE-AMS-PRINT"), ]
write.table(Name, file = "out9.csv",append=TRUE,row.names=FALSE, na="",col.names=FALSE, sep=",")
write.table(PRINT,file="out9.csv",append=TRUE,sep=",",row.names=FALSE)
#######################output#######################################
library(xlsx)
out <- read.csv("C:/Users/nitin/OneDrive/WIP/Auto/out.csv", header=FALSE, na.strings="")
out2 <- read.csv("C:/Users/nitin/OneDrive/WIP/Auto/out2.csv", header=FALSE, na.strings="")
out3 <- read.csv("C:/Users/nitin/OneDrive/WIP/Auto/out3.csv", header=FALSE, na.strings="")
out4 <- read.csv("C:/Users/nitin/OneDrive/WIP/Auto/out4.csv", header=FALSE, na.strings="")
out5 <- read.csv("C:/Users/nitin/OneDrive/WIP/Auto/out5.csv", header=FALSE, na.strings="")
out6 <- read.csv("C:/Users/nitin/OneDrive/WIP/Auto/out6.csv", header=FALSE, na.strings="")
out7 <- read.csv("C:/Users/nitin/OneDrive/WIP/Auto/out7.csv", header=FALSE, na.strings="")
out8 <- read.csv("C:/Users/nitin/OneDrive/WIP/Auto/out8.csv", header=FALSE, na.strings="")
out9 <- read.csv("C:/Users/nitin/OneDrive/WIP/Auto/out9.csv", header=FALSE, na.strings="")
write.xlsx(out, file="Consolidated.xlsx", sheetName="Open_IN",row.names=FALSE,col.names=FALSE)
write.xlsx(out2, file="Consolidated.xlsx", sheetName="Open_SR", append=TRUE,row.names=FALSE,col.names=FALSE)
write.xlsx(out3, file="Consolidated.xlsx", sheetName="Open_PR", append=TRUE,row.names=FALSE,col.names=FALSE)
write.xlsx(out4, file="Consolidated.xlsx", sheetName="New_IN", append=TRUE,row.names=FALSE,col.names=FALSE)
write.xlsx(out5, file="Consolidated.xlsx", sheetName="New_SR", append=TRUE,row.names=FALSE,col.names=FALSE)
write.xlsx(out6, file="Consolidated.xlsx", sheetName="New_PR", append=TRUE,row.names=FALSE,col.names=FALSE)
write.xlsx(out7, file="Consolidated.xlsx", sheetName="Closed_IN", append=TRUE,row.names=FALSE,col.names=FALSE)
write.xlsx(out8, file="Consolidated.xlsx", sheetName="Closed_SR", append=TRUE,row.names=FALSE,col.names=FALSE)
write.xlsx(out9, file="Consolidated.xlsx", sheetName="Closed_PR", append=TRUE,row.names=FALSE,col.names=FALSE)
##############Delete intermdeiate Files###########
file.remove("C:/Users/nitin/OneDrive/WIP/Auto/out.csv")
file.remove("C:/Users/nitin/OneDrive/WIP/Auto/out2.csv")
file.remove("C:/Users/nitin/OneDrive/WIP/Auto/out3.csv")
file.remove("C:/Users/nitin/OneDrive/WIP/Auto/out4.csv")
file.remove("C:/Users/nitin/OneDrive/WIP/Auto/out5.csv")
file.remove("C:/Users/nitin/OneDrive/WIP/Auto/out6.csv")
file.remove("C:/Users/nitin/OneDrive/WIP/Auto/out7.csv")
file.remove("C:/Users/nitin/OneDrive/WIP/Auto/out8.csv")
file.remove("C:/Users/nitin/OneDrive/WIP/Auto/out9.csv")
|
php端的输入验证,如果是POST过来的验证POST的变量,否则验证GET
$rules = array(
'name' => array(
'required',
'maxlength'=>5
),
'age' => array(
'required',
'int',
),
'email' => array(
'required',
'email'
)
);
$msg = array(
'name' => array(
'required'=>'请填写用户名',
'maxlength'=>'长度不能大于5'
),
'age' => array(
'required'=>'请填写年龄',
'int'=>'年龄只能是整数',
),
'email' => array(
'required'=>'请填写email',
'email'=>'邮箱格式错误'
)
);
var_dump(Validator::ValidateInput($_POST, $rules, $msg));
</code>
GetData 用来获取用户的输入,有时我们在程序中会这样写代码:
$userName=$_POST['userName'];
$password=$_POST['password'];
......
一堆这样的代码,看起来很不美观。可以像下面这样写:
$parm=array('name','age','email');
$data=Validator::GetData($parm);
extract($data);
当然了,你也可以直接extract($_POST);区别是可能POST中有你不需要的数据。
Validator::ValidateInput($data, $rules, $msg);
用来验证用户的输入是否正确,成功返回空'',失败反回错误信息,各字段的错误信息以<br/>分隔,用示与JQuery Validate类似支持以下几种验证:
regex:正则表达式
ip:验证IP地址
numeric:数字,整数或小数
int:整数
url:网址
maxlength:最大长度限制
minlength:最小长度限制
max:最大值
min:最小值
email:邮箱
|
/README.rd
|
no_license
|
male110/php_validator
|
R
| false
| false
| 1,826
|
rd
|
php端的输入验证,如果是POST过来的验证POST的变量,否则验证GET
$rules = array(
'name' => array(
'required',
'maxlength'=>5
),
'age' => array(
'required',
'int',
),
'email' => array(
'required',
'email'
)
);
$msg = array(
'name' => array(
'required'=>'请填写用户名',
'maxlength'=>'长度不能大于5'
),
'age' => array(
'required'=>'请填写年龄',
'int'=>'年龄只能是整数',
),
'email' => array(
'required'=>'请填写email',
'email'=>'邮箱格式错误'
)
);
var_dump(Validator::ValidateInput($_POST, $rules, $msg));
</code>
GetData 用来获取用户的输入,有时我们在程序中会这样写代码:
$userName=$_POST['userName'];
$password=$_POST['password'];
......
一堆这样的代码,看起来很不美观。可以像下面这样写:
$parm=array('name','age','email');
$data=Validator::GetData($parm);
extract($data);
当然了,你也可以直接extract($_POST);区别是可能POST中有你不需要的数据。
Validator::ValidateInput($data, $rules, $msg);
用来验证用户的输入是否正确,成功返回空'',失败反回错误信息,各字段的错误信息以<br/>分隔,用示与JQuery Validate类似支持以下几种验证:
regex:正则表达式
ip:验证IP地址
numeric:数字,整数或小数
int:整数
url:网址
maxlength:最大长度限制
minlength:最小长度限制
max:最大值
min:最小值
email:邮箱
|
#set working directory to the location where the UCI HAR Dataset was unzipped
setwd("C:/Users/goku/downloads/coursera/Data Science Track/Getting and Cleaning data/UCI HAR Dataset")
# 1. Merge the training and the test sets to create one data set
# Read in the data from files
features = read.table('./features.txt',header=FALSE); #imports features.txt
activityType = read.table('./activity_labels.txt',header=FALSE); #imports activity_labels.txt
subjectTrain = read.table('./train/subject_train.txt',header=FALSE); #imports subject_train.txt
xTrain = read.table('./train/x_train.txt',header=FALSE); #imports x_train.txt
yTrain = read.table('./train/y_train.txt',header=FALSE); #imports y_train.txt
# Assigin column names to the data imported above
colnames(activityType) = c('activityId','activityType');
colnames(subjectTrain) = "subjectId";
colnames(xTrain) = features[,2];
colnames(yTrain) = "activityId";
# Create the final training set by merging yTrain, subjectTrain, and xTrain
trainingData = cbind(yTrain,subjectTrain,xTrain);
# Read in the test data
subjectTest = read.table('./test/subject_test.txt',header=FALSE); #imports subject_test.txt
xTest = read.table('./test/x_test.txt',header=FALSE); #imports x_test.txt
yTest = read.table('./test/y_test.txt',header=FALSE); #imports y_test.txt
# Assign column names to the test data imported above
colnames(subjectTest) = "subjectId";
colnames(xTest) = features[,2];
colnames(yTest) = "activityId";
# Create the final test set by merging the xTest, yTest and subjectTest data
testData = cbind(yTest,subjectTest,xTest);
# Combine training and test data to create a final data set
finalData = rbind(trainingData,testData);
# Create a vector for the column names from the finalData, which will be used
# to select the desired mean() & stddev() columns
colNames = colnames(finalData);
# 2. Extract only the measurements on the mean and standard deviation for each measurement.
# Create a logicalVector that contains TRUE values for the ID, mean() & stddev() columns and FALSE for others
logicalVector = (grepl("activity..",colNames) | grepl("subject..",colNames) | grepl("-mean..",colNames) & !grepl("-meanFreq..",colNames) & !grepl("mean..-",colNames) | grepl("-std..",colNames) & !grepl("-std()..-",colNames));
# Subset finalData table based on the logicalVector to keep only desired columns
finalData = finalData[logicalVector==TRUE];
# 3. Use descriptive activity names to name the activities in the data set
# Merge the finalData set with the acitivityType table to include descriptive activity names
finalData = merge(finalData,activityType,by='activityId',all.x=TRUE);
# Updating the colNames vector to include the new column names after merge
colNames = colnames(finalData);
# 4. Appropriately label the data set with descriptive activity names.
# Cleaning up the variable names
for (i in 1:length(colNames))
{
colNames[i] = gsub("\\()","",colNames[i])
colNames[i] = gsub("-std$","StdDev",colNames[i])
colNames[i] = gsub("-mean","Mean",colNames[i])
colNames[i] = gsub("^(t)","time",colNames[i])
colNames[i] = gsub("^(f)","freq",colNames[i])
colNames[i] = gsub("([Gg]ravity)","Gravity",colNames[i])
colNames[i] = gsub("([Bb]ody[Bb]ody|[Bb]ody)","Body",colNames[i])
colNames[i] = gsub("[Gg]yro","Gyro",colNames[i])
colNames[i] = gsub("AccMag","AccMagnitude",colNames[i])
colNames[i] = gsub("([Bb]odyaccjerkmag)","BodyAccJerkMagnitude",colNames[i])
colNames[i] = gsub("JerkMag","JerkMagnitude",colNames[i])
colNames[i] = gsub("GyroMag","GyroMagnitude",colNames[i])
};
# Reassigning the new descriptive column names to the finalData set
colnames(finalData) = colNames;
# 5. Create a second, independent tidy data set with the average of each variable for each activity and each subject.
# Create a new table, finalDataNoActivityType without the activityType column
finalDataNoActivityType = finalData[,names(finalData) != 'activityType'];
# Summarizing the finalDataNoActivityType table to include just the mean of each variable for each activity and each subject
tidyData = aggregate(finalDataNoActivityType[,names(finalDataNoActivityType) != c('activityId','subjectId')],by=list(activityId=finalDataNoActivityType$activityId,subjectId = finalDataNoActivityType$subjectId),mean);
# Merging the tidyData with activityType to include descriptive acitvity names
tidyData = merge(tidyData,activityType,by='activityId',all.x=TRUE);
# Export the tidyData set
write.table(tidyData, './tidyData.txt',row.names=TRUE,sep='\t');
|
/run_analysis.R
|
no_license
|
gokuprasanna/Getting-and-Cleaning-Data
|
R
| false
| false
| 4,597
|
r
|
#set working directory to the location where the UCI HAR Dataset was unzipped
setwd("C:/Users/goku/downloads/coursera/Data Science Track/Getting and Cleaning data/UCI HAR Dataset")
# 1. Merge the training and the test sets to create one data set
# Read in the data from files
features = read.table('./features.txt',header=FALSE); #imports features.txt
activityType = read.table('./activity_labels.txt',header=FALSE); #imports activity_labels.txt
subjectTrain = read.table('./train/subject_train.txt',header=FALSE); #imports subject_train.txt
xTrain = read.table('./train/x_train.txt',header=FALSE); #imports x_train.txt
yTrain = read.table('./train/y_train.txt',header=FALSE); #imports y_train.txt
# Assigin column names to the data imported above
colnames(activityType) = c('activityId','activityType');
colnames(subjectTrain) = "subjectId";
colnames(xTrain) = features[,2];
colnames(yTrain) = "activityId";
# Create the final training set by merging yTrain, subjectTrain, and xTrain
trainingData = cbind(yTrain,subjectTrain,xTrain);
# Read in the test data
subjectTest = read.table('./test/subject_test.txt',header=FALSE); #imports subject_test.txt
xTest = read.table('./test/x_test.txt',header=FALSE); #imports x_test.txt
yTest = read.table('./test/y_test.txt',header=FALSE); #imports y_test.txt
# Assign column names to the test data imported above
colnames(subjectTest) = "subjectId";
colnames(xTest) = features[,2];
colnames(yTest) = "activityId";
# Create the final test set by merging the xTest, yTest and subjectTest data
testData = cbind(yTest,subjectTest,xTest);
# Combine training and test data to create a final data set
finalData = rbind(trainingData,testData);
# Create a vector for the column names from the finalData, which will be used
# to select the desired mean() & stddev() columns
colNames = colnames(finalData);
# 2. Extract only the measurements on the mean and standard deviation for each measurement.
# Create a logicalVector that contains TRUE values for the ID, mean() & stddev() columns and FALSE for others
logicalVector = (grepl("activity..",colNames) | grepl("subject..",colNames) | grepl("-mean..",colNames) & !grepl("-meanFreq..",colNames) & !grepl("mean..-",colNames) | grepl("-std..",colNames) & !grepl("-std()..-",colNames));
# Subset finalData table based on the logicalVector to keep only desired columns
finalData = finalData[logicalVector==TRUE];
# 3. Use descriptive activity names to name the activities in the data set
# Merge the finalData set with the acitivityType table to include descriptive activity names
finalData = merge(finalData,activityType,by='activityId',all.x=TRUE);
# Updating the colNames vector to include the new column names after merge
colNames = colnames(finalData);
# 4. Appropriately label the data set with descriptive activity names.
# Cleaning up the variable names
for (i in 1:length(colNames))
{
colNames[i] = gsub("\\()","",colNames[i])
colNames[i] = gsub("-std$","StdDev",colNames[i])
colNames[i] = gsub("-mean","Mean",colNames[i])
colNames[i] = gsub("^(t)","time",colNames[i])
colNames[i] = gsub("^(f)","freq",colNames[i])
colNames[i] = gsub("([Gg]ravity)","Gravity",colNames[i])
colNames[i] = gsub("([Bb]ody[Bb]ody|[Bb]ody)","Body",colNames[i])
colNames[i] = gsub("[Gg]yro","Gyro",colNames[i])
colNames[i] = gsub("AccMag","AccMagnitude",colNames[i])
colNames[i] = gsub("([Bb]odyaccjerkmag)","BodyAccJerkMagnitude",colNames[i])
colNames[i] = gsub("JerkMag","JerkMagnitude",colNames[i])
colNames[i] = gsub("GyroMag","GyroMagnitude",colNames[i])
};
# Reassigning the new descriptive column names to the finalData set
colnames(finalData) = colNames;
# 5. Create a second, independent tidy data set with the average of each variable for each activity and each subject.
# Create a new table, finalDataNoActivityType without the activityType column
finalDataNoActivityType = finalData[,names(finalData) != 'activityType'];
# Summarizing the finalDataNoActivityType table to include just the mean of each variable for each activity and each subject
tidyData = aggregate(finalDataNoActivityType[,names(finalDataNoActivityType) != c('activityId','subjectId')],by=list(activityId=finalDataNoActivityType$activityId,subjectId = finalDataNoActivityType$subjectId),mean);
# Merging the tidyData with activityType to include descriptive acitvity names
tidyData = merge(tidyData,activityType,by='activityId',all.x=TRUE);
# Export the tidyData set
write.table(tidyData, './tidyData.txt',row.names=TRUE,sep='\t');
|
library(genpathmox)
### Name: plot.xtree.pls
### Title: Plot function for the Pathmox Segmentation Trees: PLS-PM
### Aliases: plot.xtree.pls
### ** Examples
## Not run:
##D ## example of PLS-PM in alumni satisfaction
##D
##D # select manifest variables
##D data.fib <-fibtele[,12:35]
##D
##D # define inner model matrix
##D Image = rep(0,5)
##D Qual.spec = rep(0,5)
##D Qual.gen = rep(0,5)
##D Value = c(1,1,1,0,0)
##D Satis = c(1,1,1,1,0)
##D inner.fib = rbind(Image,Qual.spec, Qual.gen, Value, Satis)
##D colnames(inner.fib) = rownames(inner.fib)
##D
##D # blocks of indicators (outer model)
##D outer.fib = list(1:8,9:11,12:16,17:20,21:24)
##D modes.fib = rep("A", 5)
##D
##D # apply plspm
##D pls.fib = plspm(data.fib, inner.fib, outer.fib, modes.fib)
##D
##D # re-ordering those segmentation variables with ordinal scale
##D seg.fib= fibtele[,2:11]
##D
##D seg.fib$Age = factor(seg.fib$Age, ordered=T)
##D seg.fib$Salary = factor(seg.fib$Salary,
##D levels=c("<18k","25k","35k","45k",">45k"), ordered=T)
##D seg.fib$Accgrade = factor(seg.fib$Accgrade,
##D levels=c("accnote<7","7-8accnote","accnote>8"), ordered=T)
##D seg.fib$Grade = factor(seg.fib$Grade,
##D levels=c("<6.5note","6.5-7note","7-7.5note",">7.5note"), ordered=T)
##D
##D # Pathmox Analysis
##D fib.pathmox=pls.pathmox(pls.fib,seg.fib,signif=0.05,
##D deep=2,size=0.2,n.node=20)
##D
##D # plot pathmox tree
##D plot(pls.fib)
##D
## End(Not run)
library(genpathmox)
data(fibtele)
# select manifest variables
data.fib <-fibtele[1:50,12:35]
# define inner model matrix
Image = rep(0,5)
Qual.spec = rep(0,5)
Qual.gen = rep(0,5)
Value = c(1,1,1,0,0)
Satis = c(1,1,1,1,0)
inner.fib = rbind(Image,Qual.spec, Qual.gen, Value, Satis)
colnames(inner.fib) = rownames(inner.fib)
# blocks of indicators (outer model)
outer.fib = list(1:8,9:11,12:16,17:20,21:24)
modes.fib = rep("A", 5)
# apply plspm
pls.fib = plspm(data.fib, inner.fib, outer.fib, modes.fib)
# re-ordering those segmentation variables with ordinal scale
seg.fib = fibtele[1:50,c(2,7)]
seg.fib$Salary = factor(seg.fib$Salary,
levels=c("<18k","25k","35k","45k",">45k"), ordered=TRUE)
# Pathmox Analysis
fib.pathmox = pls.pathmox(pls.fib,seg.fib,signif=0.5,
deep=1,size=0.01,n.node=10)
plot(fib.pathmox)
|
/data/genthat_extracted_code/genpathmox/examples/plot.xtree.pls.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 2,362
|
r
|
library(genpathmox)
### Name: plot.xtree.pls
### Title: Plot function for the Pathmox Segmentation Trees: PLS-PM
### Aliases: plot.xtree.pls
### ** Examples
## Not run:
##D ## example of PLS-PM in alumni satisfaction
##D
##D # select manifest variables
##D data.fib <-fibtele[,12:35]
##D
##D # define inner model matrix
##D Image = rep(0,5)
##D Qual.spec = rep(0,5)
##D Qual.gen = rep(0,5)
##D Value = c(1,1,1,0,0)
##D Satis = c(1,1,1,1,0)
##D inner.fib = rbind(Image,Qual.spec, Qual.gen, Value, Satis)
##D colnames(inner.fib) = rownames(inner.fib)
##D
##D # blocks of indicators (outer model)
##D outer.fib = list(1:8,9:11,12:16,17:20,21:24)
##D modes.fib = rep("A", 5)
##D
##D # apply plspm
##D pls.fib = plspm(data.fib, inner.fib, outer.fib, modes.fib)
##D
##D # re-ordering those segmentation variables with ordinal scale
##D seg.fib= fibtele[,2:11]
##D
##D seg.fib$Age = factor(seg.fib$Age, ordered=T)
##D seg.fib$Salary = factor(seg.fib$Salary,
##D levels=c("<18k","25k","35k","45k",">45k"), ordered=T)
##D seg.fib$Accgrade = factor(seg.fib$Accgrade,
##D levels=c("accnote<7","7-8accnote","accnote>8"), ordered=T)
##D seg.fib$Grade = factor(seg.fib$Grade,
##D levels=c("<6.5note","6.5-7note","7-7.5note",">7.5note"), ordered=T)
##D
##D # Pathmox Analysis
##D fib.pathmox=pls.pathmox(pls.fib,seg.fib,signif=0.05,
##D deep=2,size=0.2,n.node=20)
##D
##D # plot pathmox tree
##D plot(pls.fib)
##D
## End(Not run)
library(genpathmox)
data(fibtele)
# select manifest variables
data.fib <-fibtele[1:50,12:35]
# define inner model matrix
Image = rep(0,5)
Qual.spec = rep(0,5)
Qual.gen = rep(0,5)
Value = c(1,1,1,0,0)
Satis = c(1,1,1,1,0)
inner.fib = rbind(Image,Qual.spec, Qual.gen, Value, Satis)
colnames(inner.fib) = rownames(inner.fib)
# blocks of indicators (outer model)
outer.fib = list(1:8,9:11,12:16,17:20,21:24)
modes.fib = rep("A", 5)
# apply plspm
pls.fib = plspm(data.fib, inner.fib, outer.fib, modes.fib)
# re-ordering those segmentation variables with ordinal scale
seg.fib = fibtele[1:50,c(2,7)]
seg.fib$Salary = factor(seg.fib$Salary,
levels=c("<18k","25k","35k","45k",">45k"), ordered=TRUE)
# Pathmox Analysis
fib.pathmox = pls.pathmox(pls.fib,seg.fib,signif=0.5,
deep=1,size=0.01,n.node=10)
plot(fib.pathmox)
|
################################################################
## Univariate Properties for standardized data ##
## J Di 02/12/2018 ##
################################################################
#1. moments
rm(list = ls())
setwd("~/Dropbox/Junrui Di/tensor analysis/GSVD/")
load("Data/hr50.rda")
library(qdap)
library(lubridate)
library(timeDate)
library(e1071)
TIME = char2end(as.character(timeSequence(from = hm("7:00"), to = hm("22:59"), by = "hour")),char = " ",noc=1)
TIME = beg2char(TIME,":",2)
Y = scale(hr50[,-1],center = F, scale = F)
moment_p = function(x,p){
return(mean(x^p))
}
Y2nd = apply(Y,2,moment_p,p = 2)
Y3rd = apply(Y,2,moment_p,p = 3)
Y4th = apply(Y,2,moment_p,p = 4)
pdf("Write Up/plots/univariate_moments.pdf",width = 10,height = 10)
par(mfrow = c(3,1))
plot(Y2nd,main = "Ex^2",type = "l",xaxt = "n",ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
plot(Y3rd,main = "Ex^3",type = "l",xaxt = "n", ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
plot(Y4th,main = "Ex^4",type = "l",xaxt = "n", ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
dev.off()
#2. center moments
rm(list = ls())
setwd("~/Dropbox/Junrui Di/tensor analysis/GSVD/")
load("Data/hr50.rda")
library(qdap)
library(lubridate)
library(timeDate)
library(e1071)
TIME = char2end(as.character(timeSequence(from = hm("7:00"), to = hm("22:59"), by = "hour")),char = " ",noc=1)
TIME = beg2char(TIME,":",2)
Y = scale(hr50[,-1],center = T, scale = F)
moment_p = function(x,p){
return(mean(x^p))
}
Y2nd = apply(Y,2,moment_p,p = 2)
Y3rd = apply(Y,2,moment_p,p = 3)
Y4th = apply(Y,2,moment_p,p = 4)
pdf("Write Up/plots/univariate_centermoments.pdf",width = 10,height = 10)
par(mfrow = c(3,1))
plot(Y2nd,main = "E(x-mu)^2",type = "l",xaxt = "n",ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
plot(Y3rd,main = "E(x-mu)^3",type = "l",xaxt = "n", ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
plot(Y4th,main = "E(x-mu)^4",type = "l",xaxt = "n", ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
dev.off()
#3. cumulant
rm(list = ls())
setwd("~/Dropbox/Junrui Di/tensor analysis/GSVD/")
load("Data/hr50.rda")
library(qdap)
library(lubridate)
library(timeDate)
library(e1071)
TIME = char2end(as.character(timeSequence(from = hm("7:00"), to = hm("22:59"), by = "hour")),char = " ",noc=1)
TIME = beg2char(TIME,":",2)
Y = scale(hr50[,-1],center = T, scale = F)
moment_p = function(x,p){
return(mean(x^p))
}
cumulant_4 = function(x){
a = moment_p(x,4) - 3*moment_p(x,2)^2
return(a)
}
k2 = apply(Y,2,moment_p,p = 2)
k3 = apply(Y,2,moment_p,p = 3)
k4 = apply(Y,2,cumulant_4)
pdf("Write Up/plots/univariate_cumulant.pdf",width = 10,height = 10)
par(mfrow = c(3,1))
plot(k2,main = "K2",type = "l",xaxt = "n",ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
plot(k3,main = "K3",type = "l",xaxt = "n", ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
plot(k4,main = "K4",type = "l",xaxt = "n", ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
dev.off()
#4. sd moment (no center)
rm(list = ls())
setwd("~/Dropbox/Junrui Di/tensor analysis/GSVD/")
load("Data/hr50.rda")
library(qdap)
library(lubridate)
library(timeDate)
library(e1071)
TIME = char2end(as.character(timeSequence(from = hm("7:00"), to = hm("22:59"), by = "hour")),char = " ",noc=1)
TIME = beg2char(TIME,":",2)
Y = scale(hr50[,-1],center = F, scale = T)
sd_moment_p = function(x,p){
return(mean(x^p))
}
k2 = apply(Y,2,sd_moment_p,p = 2)
k3 = apply(Y,2,sd_moment_p,p = 3)
k4 = apply(Y,2,sd_moment_p, p = 4)
pdf("Write Up/plots/univariate_sdmoment.pdf",width = 10,height = 10)
par(mfrow = c(3,1))
plot(k2,main = "sdM2",type = "l",xaxt = "n",ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
plot(k3,main = "sdM3",type = "l",xaxt = "n", ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
plot(k4,main = "sdM4",type = "l",xaxt = "n", ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
dev.off()
|
/univariate_2.R
|
no_license
|
junruidi/GSVD_Scripts
|
R
| false
| false
| 3,977
|
r
|
################################################################
## Univariate Properties for standardized data ##
## J Di 02/12/2018 ##
################################################################
#1. moments
rm(list = ls())
setwd("~/Dropbox/Junrui Di/tensor analysis/GSVD/")
load("Data/hr50.rda")
library(qdap)
library(lubridate)
library(timeDate)
library(e1071)
TIME = char2end(as.character(timeSequence(from = hm("7:00"), to = hm("22:59"), by = "hour")),char = " ",noc=1)
TIME = beg2char(TIME,":",2)
Y = scale(hr50[,-1],center = F, scale = F)
moment_p = function(x,p){
return(mean(x^p))
}
Y2nd = apply(Y,2,moment_p,p = 2)
Y3rd = apply(Y,2,moment_p,p = 3)
Y4th = apply(Y,2,moment_p,p = 4)
pdf("Write Up/plots/univariate_moments.pdf",width = 10,height = 10)
par(mfrow = c(3,1))
plot(Y2nd,main = "Ex^2",type = "l",xaxt = "n",ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
plot(Y3rd,main = "Ex^3",type = "l",xaxt = "n", ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
plot(Y4th,main = "Ex^4",type = "l",xaxt = "n", ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
dev.off()
#2. center moments
rm(list = ls())
setwd("~/Dropbox/Junrui Di/tensor analysis/GSVD/")
load("Data/hr50.rda")
library(qdap)
library(lubridate)
library(timeDate)
library(e1071)
TIME = char2end(as.character(timeSequence(from = hm("7:00"), to = hm("22:59"), by = "hour")),char = " ",noc=1)
TIME = beg2char(TIME,":",2)
Y = scale(hr50[,-1],center = T, scale = F)
moment_p = function(x,p){
return(mean(x^p))
}
Y2nd = apply(Y,2,moment_p,p = 2)
Y3rd = apply(Y,2,moment_p,p = 3)
Y4th = apply(Y,2,moment_p,p = 4)
pdf("Write Up/plots/univariate_centermoments.pdf",width = 10,height = 10)
par(mfrow = c(3,1))
plot(Y2nd,main = "E(x-mu)^2",type = "l",xaxt = "n",ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
plot(Y3rd,main = "E(x-mu)^3",type = "l",xaxt = "n", ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
plot(Y4th,main = "E(x-mu)^4",type = "l",xaxt = "n", ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
dev.off()
#3. cumulant
rm(list = ls())
setwd("~/Dropbox/Junrui Di/tensor analysis/GSVD/")
load("Data/hr50.rda")
library(qdap)
library(lubridate)
library(timeDate)
library(e1071)
TIME = char2end(as.character(timeSequence(from = hm("7:00"), to = hm("22:59"), by = "hour")),char = " ",noc=1)
TIME = beg2char(TIME,":",2)
Y = scale(hr50[,-1],center = T, scale = F)
moment_p = function(x,p){
return(mean(x^p))
}
cumulant_4 = function(x){
a = moment_p(x,4) - 3*moment_p(x,2)^2
return(a)
}
k2 = apply(Y,2,moment_p,p = 2)
k3 = apply(Y,2,moment_p,p = 3)
k4 = apply(Y,2,cumulant_4)
pdf("Write Up/plots/univariate_cumulant.pdf",width = 10,height = 10)
par(mfrow = c(3,1))
plot(k2,main = "K2",type = "l",xaxt = "n",ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
plot(k3,main = "K3",type = "l",xaxt = "n", ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
plot(k4,main = "K4",type = "l",xaxt = "n", ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
dev.off()
#4. sd moment (no center)
rm(list = ls())
setwd("~/Dropbox/Junrui Di/tensor analysis/GSVD/")
load("Data/hr50.rda")
library(qdap)
library(lubridate)
library(timeDate)
library(e1071)
TIME = char2end(as.character(timeSequence(from = hm("7:00"), to = hm("22:59"), by = "hour")),char = " ",noc=1)
TIME = beg2char(TIME,":",2)
Y = scale(hr50[,-1],center = F, scale = T)
sd_moment_p = function(x,p){
return(mean(x^p))
}
k2 = apply(Y,2,sd_moment_p,p = 2)
k3 = apply(Y,2,sd_moment_p,p = 3)
k4 = apply(Y,2,sd_moment_p, p = 4)
pdf("Write Up/plots/univariate_sdmoment.pdf",width = 10,height = 10)
par(mfrow = c(3,1))
plot(k2,main = "sdM2",type = "l",xaxt = "n",ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
plot(k3,main = "sdM3",type = "l",xaxt = "n", ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
plot(k4,main = "sdM4",type = "l",xaxt = "n", ylab = "")
axis(1, at = c(seq(1,32,2)),labels = TIME)
dev.off()
|
# Load the data
#--------------
# The code that loads the data is
# common for all the plots, but
# should all the plots be made,
# the code only needs to be run once.
# read the data in a data frame
data<-na.omit(read.table("./data/household_power_consumption.txt", sep=";", header=TRUE, na.strings="?", colClasses=c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric")))
data$Date<-as.Date(data$Date, "%d/%m/%Y")
# Select the data between the two dates
date1<-as.Date("2007-02-01")
date2<-as.Date("2007-02-02")
data.selection<-data[(data$Date==date1) | (data$Date==date2), ]
data.selection$Date<-paste(data.selection$Date, data.selection$Time,sep=" ")
# Generate plot no.2
#-------------------
png(filename = "plot2.png", width = 480, height = 480)
plot(strptime(data.selection$Date, format="%Y-%m-%d %H:%M:%S"),
data.selection$Global_active_power,
type="l",
xlab="",
ylab="Global Active Power (kilowatts)")
dev.off()
|
/plot2.R
|
no_license
|
adrianbrustur/ExData_Plotting1
|
R
| false
| false
| 988
|
r
|
# Load the data
#--------------
# The code that loads the data is
# common for all the plots, but
# should all the plots be made,
# the code only needs to be run once.
# read the data in a data frame
data<-na.omit(read.table("./data/household_power_consumption.txt", sep=";", header=TRUE, na.strings="?", colClasses=c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric")))
data$Date<-as.Date(data$Date, "%d/%m/%Y")
# Select the data between the two dates
date1<-as.Date("2007-02-01")
date2<-as.Date("2007-02-02")
data.selection<-data[(data$Date==date1) | (data$Date==date2), ]
data.selection$Date<-paste(data.selection$Date, data.selection$Time,sep=" ")
# Generate plot no.2
#-------------------
png(filename = "plot2.png", width = 480, height = 480)
plot(strptime(data.selection$Date, format="%Y-%m-%d %H:%M:%S"),
data.selection$Global_active_power,
type="l",
xlab="",
ylab="Global Active Power (kilowatts)")
dev.off()
|
library(dplyr)
library(reshape2)
library(tidyr)
load(file=paste0(getwd(),"/vegas2018.Rdata")) # b16
bb <- bb.data18
bb$fav.out <- ifelse(bb$home.ML < 100 & bb$home.win == 'W', 'W', 'L')
bb$home.ML.grp <- round(as.integer(bb$home.ML) / 10)
bb1 <- bb %>%
select(home.ML.grp,fav.out) %>%
group_by(home.ML.grp,fav.out) %>%
as.data.frame(tally())
bb1 <- dcast(bb1, home.ML.grp ~ fav.out)
write.csv(bb1,"wl18.csv")
|
/v 2020 a1.R
|
no_license
|
maximize22/bball2018
|
R
| false
| false
| 434
|
r
|
library(dplyr)
library(reshape2)
library(tidyr)
load(file=paste0(getwd(),"/vegas2018.Rdata")) # b16
bb <- bb.data18
bb$fav.out <- ifelse(bb$home.ML < 100 & bb$home.win == 'W', 'W', 'L')
bb$home.ML.grp <- round(as.integer(bb$home.ML) / 10)
bb1 <- bb %>%
select(home.ML.grp,fav.out) %>%
group_by(home.ML.grp,fav.out) %>%
as.data.frame(tally())
bb1 <- dcast(bb1, home.ML.grp ~ fav.out)
write.csv(bb1,"wl18.csv")
|
######################################################################
#SCRIPT QUE FILTRA DATOS DE EXPRESION
#Autor: Andrés Rabinovich en base a un script de Ariel Chernomoretz
#Creación: 01/06/2018
#Última modificación: XX/XX/XXXX (por XXX)
######################################################################
#Librerías que necesita
require(edgeR)
#Elige directorio de trabajo
setwd("/home/arabinov/doctorado/programacion/redes_mixtas/")
#Levanta las cuentas
(load("pipeline_archivos/cuentas.Rdata"))
#Elegimos las condiciones para la que vamos a generar la red
(load("pipeline_archivos/1_seleccion_de_condiciones.Rdata"))
#Solo queremos las cuentas de la temperatura indicada
iTemp <- grep(paste0("at_",temperatura,"_"),colnames(cuentas_genes))
iTemp <- c(iTemp, grep(paste0("at_",temperatura_referencia,"_"),colnames(cuentas_genes)))
#Filtra las cuentas. Un gen se considera expresado si recibió en promedio más de un mínimo número de cuentas
#(parametro minGenReads, default=10) y si el average read density per condition supera un umbral en alguna de
#las condiciones (parametro minRds,default=0.05).
i1 <- apply(cuentas_genes[,iTemp],1,function(x){mean(x) > 10})
i22 <- apply(cuentas_genes[,iTemp]/cuentas_genes[,"effective_length"],1,function(x){mean(x) > 0.05})
ipass <- i1 & i22
#Filtramos las cuentas
cuentas_genes <- cuentas_genes[ipass, c(1:9, iTemp)]
#data.frame con las condiciones experimentales.
phenotype<-data.frame(condition = c(paste0(temperatura_referencia, ".T", rep(seq(1:12), each=2)),
paste0(temperatura, ".T", rep(seq(1:12), each=2)) ),
temperaturas = rep(c(temperatura_referencia, temperatura), each=24),
tiempos = rep(1:12, each=2, times=2)
)
#Ordena los niveles de las condiciones experimentales
phenotype$condition <- factor(phenotype$condition,levels=unique(phenotype$condition))
phenotype$tiempos <- factor(phenotype$tiempos,levels=unique(phenotype$tiempos))
phenotype$temperaturas <- factor(phenotype$temperaturas,levels=unique(phenotype$temperaturas))
rownames(phenotype) <- paste0("at_", paste(rep(c(temperatura_referencia, temperatura), each=24),
paste(rep(1:12, each=2), c("A", "B"), sep="_"),
sep="_"))
#Arma el objeto DGEList con las cuentas de los bines y de los genes y las condiciones
y <- DGEList(counts=cuentas_genes[,10:ncol(cuentas_genes)], group = phenotype$condition, genes = cuentas_genes[,1:9])
#Calcula el factor de normalización para escalar los tamaños de las librerías
y <- calcNormFactors(y, method=c("TMM","RLE","upperquartile")[1])
#El diseño experimental es cada condición contra la referencia, que elegimos por defecto sea T22.
#Entonces, para decidir si un bin se expresó diferencialmente, se compara su expresión en cada tiempo contra la
#del mismo tiempo en T22
design <- model.matrix(~condition + 0, data=phenotype)
#Para fitear una binomial negativa se necesita estimar el parámetro de dispersión de la binomial negativa (1/r, con r
#la cantidad de veces que tiene que fallar la binomial negativa).
y <- estimateDisp(y, design)
#Se ajusta con el modelo de binomial negativa las cuentas de los bines y se los compara con la referencia.
#En realidad el ajuste devuelve los beta que mejor ajustan y como es un glm los beta son logs. La resta del beta contra
#el beta de la referencia entonces es un ratio entre betas y eso es lo que termina siendo la comparación.
#Además, modificamos la función glmFit para que devuelva el error estandar. Necesitamos el error estandar para poder
#reducir los coeficientes que vienen de comparaciones con muy pocas cuentas y que por eso se disparan como si estuvieran
#cambiando mucho pero en realidad no cambian sino que están en cero y es el gen el que cambia.
fit <- glmFit(y, design)
#Ajustamos por glmLRT para encontrar que genes se encuentran diferencialmente expresados entre las dos condiciones
lfchange_genes <- matrix(NA,ncol=(0.5*ncol(fit$coef)),nrow=nrow(fit$coef))
rownames(lfchange_genes) <- rownames(fit$coef)
colnames(lfchange_genes) <- paste("condicion", paste(temperatura_referencia,temperatura, sep="."), 1:12, sep=".")
pvalues_genes <- lfchange_genes
for(i in 1:(0.5*ncol(fit$coef))){
#Generamos los contrastes
contrastes <- rep(0, 24)
contrastes[i] <- -1
contrastes[i + 12] <- 1
cat("===============================================\nEvaluando contrastes",
paste(paste0(contrastes[1:12], collapse = ""), paste0(contrastes[13:24], collapse = ""), sep="|")
,"\n")
ds <- glmLRT(fit, contrast = contrastes)
genname <- rownames(ds$genes)
lfchange_genes[genname, i] <- ds$table$logFC
pvalues_genes[genname, i] <- ds$table$PValue
}
lfchange_genes <- lfchange_genes[!apply(lfchange_genes, 1, function(x){any(is.na(x))}), ]
pvalues_genes <- pvalues_genes[!apply(pvalues_genes, 1, function(x){any(is.na(x))}), ]
#ajusto TODOS los pv
qvalues_genes <- matrix(p.adjust(pvalues_genes,"fdr"), ncol=ncol(pvalues_genes), byrow=FALSE)
rownames(qvalues_genes) <- rownames(pvalues_genes)
#data.frame con las condiciones experimentales.
phenotype<-data.frame(condition = c(paste0("T", temperatura, ".t", rep(seq(1:12), each=2))),
temperaturas = rep(c(temperatura), each=24),
tiempos = rep(1:12, each=2)
)
#Ordena los niveles de las condiciones experimentales
phenotype$condition <- factor(phenotype$condition,levels=unique(phenotype$condition))
phenotype$tiempos <- factor(phenotype$tiempos,levels=unique(phenotype$tiempos))
phenotype$temperaturas <- factor(phenotype$temperaturas,levels=unique(phenotype$temperaturas))
rownames(phenotype) <- paste0("at_", paste(rep(c(temperatura), each=24),
paste(rep(1:12, each=2), c("A", "B"), sep="_"),
sep="_"))
genes_crudos <- cuentas_genes[, grep(paste0("at_", temperatura), colnames(cuentas_genes))]
#Fiteamos las cuentas de los genes por condición para temperatura
design <- model.matrix(~condition + 0, data=phenotype)
#Arma el objeto DGEList con las cuentas de los bines y de los genes y las condiciones
y_genes <- DGEList(counts=genes_crudos, group = phenotype$condition, genes = genes_crudos[,1:8])
#Calcula el factor de normalización para escalar los tamaños de las librerías
y_genes <- calcNormFactors(y_genes, method=c("TMM","RLE","upperquartile")[1])
#Para fitear una binomial negativa se necesita estimar el parámetro de dispersión de la binomial negativa (1/r, con r
#la cantidad de veces que tiene que fallar la binomial negativa).
y_genes <- estimateDisp(y_genes, design)
#Ajustamos los genes y usamos los coeficientes del ajuste como perfiles
fit_genes <- glmFit(y_genes, design)
#Pedimos cambios temporales en los perfiles
pvalues_tiempo <- matrix(0, ncol=11, nrow=nrow(fit_genes$coefficients))
rownames(pvalues_tiempo) <- rownames(fit_genes$coefficients)
lfchange_tiempo <- pvalues_tiempo
for(i in 2:12){
ds <- glmLRT(fit_genes, coef = i)
pvalues_tiempo[, i-1] <- ds$table$PValue
lfchange_tiempo[, i-1] <- ds$table$logFC
}
lfchange_tiempo <- lfchange_tiempo[!apply(lfchange_tiempo, 1, function(x){any(is.na(x))}), ]
pvalues_tiempo <- pvalues_tiempo[!apply(pvalues_tiempo, 1, function(x){any(is.na(x))}), ]
#ajusto TODOS los pv
qvalues_tiempo <- matrix(p.adjust(pvalues_tiempo,"fdr"), ncol=ncol(pvalues_tiempo), byrow=FALSE)
rownames(qvalues_tiempo) <- rownames(pvalues_tiempo)
#Guarda las cuentas de los genes y los cambios (log fold change) entre condiciones
perfiles_genes <- fit_genes$coefficients
save(lfchange_genes, qvalues_genes, lfchange_tiempo, qvalues_tiempo, perfiles_genes, cuentas_genes, file="pipeline_archivos/2_genes_prefiltrados.Rdata")
|
/pipeline/2_prefiltrado_genes.R
|
no_license
|
andresrabinovich/redes_mixtas
|
R
| false
| false
| 8,031
|
r
|
######################################################################
#SCRIPT QUE FILTRA DATOS DE EXPRESION
#Autor: Andrés Rabinovich en base a un script de Ariel Chernomoretz
#Creación: 01/06/2018
#Última modificación: XX/XX/XXXX (por XXX)
######################################################################
#Librerías que necesita
require(edgeR)
#Elige directorio de trabajo
setwd("/home/arabinov/doctorado/programacion/redes_mixtas/")
#Levanta las cuentas
(load("pipeline_archivos/cuentas.Rdata"))
#Elegimos las condiciones para la que vamos a generar la red
(load("pipeline_archivos/1_seleccion_de_condiciones.Rdata"))
#Solo queremos las cuentas de la temperatura indicada
iTemp <- grep(paste0("at_",temperatura,"_"),colnames(cuentas_genes))
iTemp <- c(iTemp, grep(paste0("at_",temperatura_referencia,"_"),colnames(cuentas_genes)))
#Filtra las cuentas. Un gen se considera expresado si recibió en promedio más de un mínimo número de cuentas
#(parametro minGenReads, default=10) y si el average read density per condition supera un umbral en alguna de
#las condiciones (parametro minRds,default=0.05).
i1 <- apply(cuentas_genes[,iTemp],1,function(x){mean(x) > 10})
i22 <- apply(cuentas_genes[,iTemp]/cuentas_genes[,"effective_length"],1,function(x){mean(x) > 0.05})
ipass <- i1 & i22
#Filtramos las cuentas
cuentas_genes <- cuentas_genes[ipass, c(1:9, iTemp)]
#data.frame con las condiciones experimentales.
phenotype<-data.frame(condition = c(paste0(temperatura_referencia, ".T", rep(seq(1:12), each=2)),
paste0(temperatura, ".T", rep(seq(1:12), each=2)) ),
temperaturas = rep(c(temperatura_referencia, temperatura), each=24),
tiempos = rep(1:12, each=2, times=2)
)
#Ordena los niveles de las condiciones experimentales
phenotype$condition <- factor(phenotype$condition,levels=unique(phenotype$condition))
phenotype$tiempos <- factor(phenotype$tiempos,levels=unique(phenotype$tiempos))
phenotype$temperaturas <- factor(phenotype$temperaturas,levels=unique(phenotype$temperaturas))
rownames(phenotype) <- paste0("at_", paste(rep(c(temperatura_referencia, temperatura), each=24),
paste(rep(1:12, each=2), c("A", "B"), sep="_"),
sep="_"))
#Arma el objeto DGEList con las cuentas de los bines y de los genes y las condiciones
y <- DGEList(counts=cuentas_genes[,10:ncol(cuentas_genes)], group = phenotype$condition, genes = cuentas_genes[,1:9])
#Calcula el factor de normalización para escalar los tamaños de las librerías
y <- calcNormFactors(y, method=c("TMM","RLE","upperquartile")[1])
#El diseño experimental es cada condición contra la referencia, que elegimos por defecto sea T22.
#Entonces, para decidir si un bin se expresó diferencialmente, se compara su expresión en cada tiempo contra la
#del mismo tiempo en T22
design <- model.matrix(~condition + 0, data=phenotype)
#Para fitear una binomial negativa se necesita estimar el parámetro de dispersión de la binomial negativa (1/r, con r
#la cantidad de veces que tiene que fallar la binomial negativa).
y <- estimateDisp(y, design)
#Se ajusta con el modelo de binomial negativa las cuentas de los bines y se los compara con la referencia.
#En realidad el ajuste devuelve los beta que mejor ajustan y como es un glm los beta son logs. La resta del beta contra
#el beta de la referencia entonces es un ratio entre betas y eso es lo que termina siendo la comparación.
#Además, modificamos la función glmFit para que devuelva el error estandar. Necesitamos el error estandar para poder
#reducir los coeficientes que vienen de comparaciones con muy pocas cuentas y que por eso se disparan como si estuvieran
#cambiando mucho pero en realidad no cambian sino que están en cero y es el gen el que cambia.
fit <- glmFit(y, design)
#Ajustamos por glmLRT para encontrar que genes se encuentran diferencialmente expresados entre las dos condiciones
lfchange_genes <- matrix(NA,ncol=(0.5*ncol(fit$coef)),nrow=nrow(fit$coef))
rownames(lfchange_genes) <- rownames(fit$coef)
colnames(lfchange_genes) <- paste("condicion", paste(temperatura_referencia,temperatura, sep="."), 1:12, sep=".")
pvalues_genes <- lfchange_genes
for(i in 1:(0.5*ncol(fit$coef))){
#Generamos los contrastes
contrastes <- rep(0, 24)
contrastes[i] <- -1
contrastes[i + 12] <- 1
cat("===============================================\nEvaluando contrastes",
paste(paste0(contrastes[1:12], collapse = ""), paste0(contrastes[13:24], collapse = ""), sep="|")
,"\n")
ds <- glmLRT(fit, contrast = contrastes)
genname <- rownames(ds$genes)
lfchange_genes[genname, i] <- ds$table$logFC
pvalues_genes[genname, i] <- ds$table$PValue
}
lfchange_genes <- lfchange_genes[!apply(lfchange_genes, 1, function(x){any(is.na(x))}), ]
pvalues_genes <- pvalues_genes[!apply(pvalues_genes, 1, function(x){any(is.na(x))}), ]
#ajusto TODOS los pv
qvalues_genes <- matrix(p.adjust(pvalues_genes,"fdr"), ncol=ncol(pvalues_genes), byrow=FALSE)
rownames(qvalues_genes) <- rownames(pvalues_genes)
#data.frame con las condiciones experimentales.
phenotype<-data.frame(condition = c(paste0("T", temperatura, ".t", rep(seq(1:12), each=2))),
temperaturas = rep(c(temperatura), each=24),
tiempos = rep(1:12, each=2)
)
#Ordena los niveles de las condiciones experimentales
phenotype$condition <- factor(phenotype$condition,levels=unique(phenotype$condition))
phenotype$tiempos <- factor(phenotype$tiempos,levels=unique(phenotype$tiempos))
phenotype$temperaturas <- factor(phenotype$temperaturas,levels=unique(phenotype$temperaturas))
rownames(phenotype) <- paste0("at_", paste(rep(c(temperatura), each=24),
paste(rep(1:12, each=2), c("A", "B"), sep="_"),
sep="_"))
genes_crudos <- cuentas_genes[, grep(paste0("at_", temperatura), colnames(cuentas_genes))]
#Fiteamos las cuentas de los genes por condición para temperatura
design <- model.matrix(~condition + 0, data=phenotype)
#Arma el objeto DGEList con las cuentas de los bines y de los genes y las condiciones
y_genes <- DGEList(counts=genes_crudos, group = phenotype$condition, genes = genes_crudos[,1:8])
#Calcula el factor de normalización para escalar los tamaños de las librerías
y_genes <- calcNormFactors(y_genes, method=c("TMM","RLE","upperquartile")[1])
#Para fitear una binomial negativa se necesita estimar el parámetro de dispersión de la binomial negativa (1/r, con r
#la cantidad de veces que tiene que fallar la binomial negativa).
y_genes <- estimateDisp(y_genes, design)
#Ajustamos los genes y usamos los coeficientes del ajuste como perfiles
fit_genes <- glmFit(y_genes, design)
#Pedimos cambios temporales en los perfiles
pvalues_tiempo <- matrix(0, ncol=11, nrow=nrow(fit_genes$coefficients))
rownames(pvalues_tiempo) <- rownames(fit_genes$coefficients)
lfchange_tiempo <- pvalues_tiempo
for(i in 2:12){
ds <- glmLRT(fit_genes, coef = i)
pvalues_tiempo[, i-1] <- ds$table$PValue
lfchange_tiempo[, i-1] <- ds$table$logFC
}
lfchange_tiempo <- lfchange_tiempo[!apply(lfchange_tiempo, 1, function(x){any(is.na(x))}), ]
pvalues_tiempo <- pvalues_tiempo[!apply(pvalues_tiempo, 1, function(x){any(is.na(x))}), ]
#ajusto TODOS los pv
qvalues_tiempo <- matrix(p.adjust(pvalues_tiempo,"fdr"), ncol=ncol(pvalues_tiempo), byrow=FALSE)
rownames(qvalues_tiempo) <- rownames(pvalues_tiempo)
#Guarda las cuentas de los genes y los cambios (log fold change) entre condiciones
perfiles_genes <- fit_genes$coefficients
save(lfchange_genes, qvalues_genes, lfchange_tiempo, qvalues_tiempo, perfiles_genes, cuentas_genes, file="pipeline_archivos/2_genes_prefiltrados.Rdata")
|
testlist <- list(testX = c(191493125665849920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 2.46020195254853e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.5105425626729e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 6.4776117874557e-72, 1.21321365773924e-195, 9.69744674150153e-268, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537493e+266, 2.65348875902107e+136, 8.14965241967603e+92, 2.59677146539475e-173, 1.55228780425777e-91, 8.25550184376779e+105, 1.18572662524891e+134, 1.04113208597565e+183, 1.01971211553913e-259, 1.23680594512923e-165, 5.24757023065221e+62, 3.41816623041351e-96 ), .Dim = c(5L, 7L)))
result <- do.call(dann:::calc_distance_C,testlist)
str(result)
|
/dann/inst/testfiles/calc_distance_C/AFL_calc_distance_C/calc_distance_C_valgrind_files/1609869297-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 1,198
|
r
|
testlist <- list(testX = c(191493125665849920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 2.46020195254853e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.5105425626729e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 6.4776117874557e-72, 1.21321365773924e-195, 9.69744674150153e-268, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537493e+266, 2.65348875902107e+136, 8.14965241967603e+92, 2.59677146539475e-173, 1.55228780425777e-91, 8.25550184376779e+105, 1.18572662524891e+134, 1.04113208597565e+183, 1.01971211553913e-259, 1.23680594512923e-165, 5.24757023065221e+62, 3.41816623041351e-96 ), .Dim = c(5L, 7L)))
result <- do.call(dann:::calc_distance_C,testlist)
str(result)
|
fullData <- read.delim("household_power_consumption.txt",sep=";", stringsAsFactors=FALSE)
fullData$dateTime <- strptime(paste(fullData$Date, fullData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
plotData <- fullData[as.Date(fullData$dateTime) == as.Date("2007-02-02") | as.Date(fullData$dateTime) == as.Date("2007-02-01"),]
par(mfrow=c(1,1))
hist(as.numeric(plotData$Global_active_power), col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)", cex.axis=0.75, cex.lab=0.75)
dev.copy(png, "plot1.png", width=480, height=480, units="px")
dev.off()
|
/plot1.R
|
no_license
|
mpuittinen/ExData_Plotting1
|
R
| false
| false
| 560
|
r
|
fullData <- read.delim("household_power_consumption.txt",sep=";", stringsAsFactors=FALSE)
fullData$dateTime <- strptime(paste(fullData$Date, fullData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
plotData <- fullData[as.Date(fullData$dateTime) == as.Date("2007-02-02") | as.Date(fullData$dateTime) == as.Date("2007-02-01"),]
par(mfrow=c(1,1))
hist(as.numeric(plotData$Global_active_power), col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)", cex.axis=0.75, cex.lab=0.75)
dev.copy(png, "plot1.png", width=480, height=480, units="px")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{addforecast}
\alias{addforecast}
\alias{addforecast.samforecast}
\title{SAM add forecasts}
\usage{
addforecast(fit, what, dotcol = "black", dotpch = 19, dotcex = 1.5,
intervalcol = gray(0.5, alpha = 0.5), ...)
\method{addforecast}{samforecast}(fit, what, dotcol = "black", dotpch = 19,
dotcex = 1.5, intervalcol = gray(0.5, alpha = 0.5), ...)
}
\arguments{
\item{fit}{the object returned from sam.fit}
\item{what}{what to plot}
\item{dotcol}{color for dot}
\item{dotpch}{pch for dot}
\item{dotcex}{cex for dot}
\item{intervalcol}{color for interval}
\item{...}{extra arguments not currently used}
}
\description{
SAM add forecasts
}
\details{
internal plotting fun
}
|
/stockassessment/man/addforecast.Rd
|
no_license
|
jimianelli/SAM
|
R
| false
| true
| 769
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{addforecast}
\alias{addforecast}
\alias{addforecast.samforecast}
\title{SAM add forecasts}
\usage{
addforecast(fit, what, dotcol = "black", dotpch = 19, dotcex = 1.5,
intervalcol = gray(0.5, alpha = 0.5), ...)
\method{addforecast}{samforecast}(fit, what, dotcol = "black", dotpch = 19,
dotcex = 1.5, intervalcol = gray(0.5, alpha = 0.5), ...)
}
\arguments{
\item{fit}{the object returned from sam.fit}
\item{what}{what to plot}
\item{dotcol}{color for dot}
\item{dotpch}{pch for dot}
\item{dotcex}{cex for dot}
\item{intervalcol}{color for interval}
\item{...}{extra arguments not currently used}
}
\description{
SAM add forecasts
}
\details{
internal plotting fun
}
|
sortResults <- function( pbatObj, sortBy=NULL ) {
## I'm guessing Christoph titles this differently sometimes?
if( is.null(sortBy) ) {
guess <- c("powerFBAT")
}else{
guess <- sortBy
}
for( g in guess ) {
wh <- which( g == names(pbatObj$results) )
if( length(wh)>0 ) {
pbatObj$results <- pbatObj$results[order(pbatObj$results[,wh], decreasing=TRUE),]
return(pbatObj)
}
}
stop( "pbat data could not be sorted. set the 'sortBy' option to the name that corresponds to the conditional power estimate you wish to use." )
}
top <- function( pbatObj, n=10, sortBy=NULL ) {
if( class(pbatObj)[1]!="pbat" )
stop( "Object must be of class 'pbat', i.e. a result of 'pbat.m(...)'." )
pbatObj <- sortResults( pbatObj, sortBy=sortBy )
if( n<1 || n>nrow(pbatObj$results) )
n <- nrow( pbatObj$results )
return( pbatObj$results[1:n,] )
}
|
/R/top.R
|
no_license
|
cran/pbatR
|
R
| false
| false
| 889
|
r
|
sortResults <- function( pbatObj, sortBy=NULL ) {
## I'm guessing Christoph titles this differently sometimes?
if( is.null(sortBy) ) {
guess <- c("powerFBAT")
}else{
guess <- sortBy
}
for( g in guess ) {
wh <- which( g == names(pbatObj$results) )
if( length(wh)>0 ) {
pbatObj$results <- pbatObj$results[order(pbatObj$results[,wh], decreasing=TRUE),]
return(pbatObj)
}
}
stop( "pbat data could not be sorted. set the 'sortBy' option to the name that corresponds to the conditional power estimate you wish to use." )
}
top <- function( pbatObj, n=10, sortBy=NULL ) {
if( class(pbatObj)[1]!="pbat" )
stop( "Object must be of class 'pbat', i.e. a result of 'pbat.m(...)'." )
pbatObj <- sortResults( pbatObj, sortBy=sortBy )
if( n<1 || n>nrow(pbatObj$results) )
n <- nrow( pbatObj$results )
return( pbatObj$results[1:n,] )
}
|
test_that("Test that stan_prior accepts supported prior classes", {
prior <- prior_frame(prior = "uniform(0,10)", class = "b")
expect_equal(stan_prior(class = "b", coef = "x1", prior = prior),
" b ~ uniform(0,10); \n")
prior <- prior_frame(prior = c("uniform(0,10)", "normal(0,1)"),
class = "b", coef = c("", "x1"))
expect_equal(stan_prior(class = "b", coef = c("x1","x2"), prior = prior),
" b[1] ~ normal(0,1); \n b[2] ~ uniform(0,10); \n")
expect_equal(stan_prior("ar", prior = prior_frame("uniform(0,1)", class = "ar")),
" ar ~ uniform(0,1); \n")
expect_equal(stan_prior("ma", prior = prior_frame("normal(0,5)", class = "ma")),
" ma ~ normal(0,5); \n")
prior <- prior_frame("lkj_corr_cholesky(2)", class = "rescor")
expect_equal(stan_prior("rescor", prior = prior),
" rescor ~ lkj_corr_cholesky(2); \n")
})
test_that("Test that stan_prior returns the correct indices", {
prior <- prior_frame(prior = c("cauchy(0,5)", "normal(0,1)", "normal(0,1)"),
class = c("sd", "sd", "bp"), coef = c("", "x2", "z"))
expect_equal(stan_prior(class = "sd", coef = "Intercept", prior = prior),
" sd ~ cauchy(0,5); \n")
expect_equal(stan_prior(class = "sd", coef = c("x1", "x2"), prior = prior),
" sd[1] ~ cauchy(0,5); \n sd[2] ~ normal(0,1); \n")
expect_equal(stan_prior("bp", coef = "z", prior = prior),
" bp[1] ~ normal(0,1); \n")
})
test_that("Test that stan_prior can remove default priors", {
prior <- prior_frame(prior = "", class = c("sigma", "sd", "shape"),
group = c("", "g", ""))
expect_equal(stan_prior("sigma", prior = prior), "")
expect_equal(stan_prior("sd", group = "g", prior = prior), "")
expect_equal(stan_prior("shape", prior = prior), "")
})
test_that("Test that stan_eta returns correct strings for autocorrelation models", {
expect_match(stan_eta(family = "poisson", link = "log", f = c("Trt_c"),
autocor = cor.arma(~visit|patient, p=1))$transC1,
"eta <- X * b + b_Intercept + Yar * ar", fixed = TRUE)
expect_match(stan_eta(family = "poisson", link = "log", f = c("Trt_c"),
autocor = cor.arma(~visit|patient, q=1))$transC2,
"eta[n] <- eta[n] + Ema[n] * ma", fixed = TRUE)
})
test_that("Test_that stan_ma returns correct strings (or errors) for moving average models", {
expect_equal(stan_ma(family = "gaussian", link = "log", autocor = cor.arma()), list())
expect_match(stan_ma(family = "gaussian", link = "log", autocor = cor.arma(~visit|patient, q=1))$transC2,
"Ema[n + 1, i] <- e[n + 1 - i]", fixed = TRUE)
expect_match(stan_ma(family = "multinormal", link = "inverse", autocor = cor.arma(~visit|patient, q=1))$transC2,
"e[n] <- inv(Y[m, k]) - eta[n]", fixed = TRUE)
expect_error(stan_ma(family = "poisson", link = "log", autocor = cor.arma(~visit|patient, p=1, q=1)),
"moving-average models for family poisson are not yet implemented")
})
test_that("Test that stan_model accepts supported links", {
expect_match(stan_model(rating ~ treat + period + carry, data = inhaler,
family = "sratio", link = "probit_approx"), "Phi_approx")
expect_match(stan_model(rating ~ treat + period + carry, data = inhaler,
family = "cumulative", link = "probit"), "Phi")
expect_match(stan_model(rating ~ treat + period + carry, data = inhaler,
family = "poisson", link = "log"), "log")
})
test_that("Test that stan_model returns correct strings for customized covariances", {
expect_match(stan_model(rating ~ treat + period + carry + (1|subject), data = inhaler,
cov.ranef = list(subject = 1)), fixed = TRUE,
"r_1 <- sd_1 * (cov_1 * pre_1)")
expect_match(stan_model(rating ~ treat + period + carry + (1+carry|subject), data = inhaler,
cov.ranef = list(subject = 1)), fixed = TRUE,
paste0("r_1 <- to_array(kronecker_cholesky(cov_1, L_1, sd_1) * ",
"to_vector(pre_1), N_1, K_1"))
expect_match(stan_model(rating ~ treat + period + carry + (1+carry||subject), data = inhaler,
cov.ranef = list(subject = 1)), fixed = TRUE,
paste0("r_1 <- to_array(to_vector(rep_matrix(sd_1, N_1)) .* ",
"(cov_1 * to_vector(pre_1)), N_1, K_1)"))
})
test_that("Test that stan_model handles addition arguments correctly", {
expect_match(stan_model(time | cens(censored) ~ age + sex + disease, data = kidney,
family = "weibull", link = "log"), "vector[N] cens;", fixed = TRUE)
expect_match(stan_model(time | trunc(0) ~ age + sex + disease, data = kidney,
family = "gamma", link = "log"), "T[lb, ];", fixed = TRUE)
expect_match(stan_model(time | trunc(ub = 100) ~ age + sex + disease, data = kidney,
family = "cauchy", link = "log"), "T[, ub];", fixed = TRUE)
expect_match(stan_model(count | trunc(0, 150) ~ Trt_c, data = epilepsy,
family = "poisson", link = "log"), "T[lb, ub];", fixed = TRUE)
})
test_that("Test that stan_model correctly combines strings of multiple grouping factors", {
expect_match(stan_model(count ~ (1|patient) + (1+Trt_c|visit),
data = epilepsy, family = "poisson", link = "log"),
" real Z_1[N]; # RE design matrix \n # data for random effects of visit \n",
fixed = TRUE)
expect_match(stan_model(count ~ (1|visit) + (1+Trt_c|patient),
data = epilepsy, family = "poisson", link = "log"),
" int NC_1; # number of correlations \n # data for random effects of visit \n",
fixed = TRUE)
})
test_that("Test that stan_ordinal returns correct strings", {
expect_match(stan_ordinal(family = "sratio", link = "logit")$par, "")
})
test_that("Test that stan_llh uses simplifications when possible", {
expect_equal(stan_llh(family = "bernoulli", link = "logit"), " Y ~ bernoulli_logit(eta); \n")
expect_equal(stan_llh(family = "gaussian", link = "log"), " Y ~ lognormal(eta, sigma); \n")
expect_match(stan_llh(family = "gaussian", link = "log", weights = TRUE),
"lognormal_log(Y[n], eta[n], sigma); \n", fixed = TRUE)
expect_equal(stan_llh(family = "poisson", link = "log"), " Y ~ poisson_log(eta); \n")
expect_match(stan_llh(family = "cumulative", link = "logit"), fixed = TRUE,
" Y[n] ~ ordered_logistic(eta[n], b_Intercept); \n")
})
test_that("Test that stan_llh returns correct llhs under weights and censoring", {
expect_equal(stan_llh(family = "cauchy", link = "inverse", weights = TRUE),
" lp_pre[n] <- cauchy_log(Y[n], eta[n], sigma); \n")
expect_equal(stan_llh(family = "poisson", link = "log", weights = TRUE),
" lp_pre[n] <- poisson_log_log(Y[n], eta[n]); \n")
expect_match(stan_llh(family = "poisson", link = "log", cens = TRUE),
"Y[n] ~ poisson(exp(eta[n])); \n", fixed = TRUE)
expect_equal(stan_llh(family = "binomial", link = "logit", add = TRUE, weights = TRUE),
" lp_pre[n] <- binomial_logit_log(Y[n], trials[n], eta[n]); \n")
expect_match(stan_llh(family = "weibull", link = "inverse", cens = TRUE), fixed = TRUE,
"increment_log_prob(weibull_ccdf_log(Y[n], shape, eta[n])); \n")
expect_match(stan_llh(family = "weibull", link = "inverse", cens = TRUE, weights = TRUE), fixed = TRUE,
"increment_log_prob(weights[n] * weibull_ccdf_log(Y[n], shape, eta[n])); \n")
})
test_that("Test that stan_rngprior returns correct sampling statements for priors", {
c1 <- " # parameters to store prior samples \n"
c2 <- " # additionally draw samples from priors \n"
expect_equal(stan_rngprior(TRUE, prior = "nu ~ uniform(0,100); \n"),
list(par = paste0(c1," real<lower=0> prior_nu; \n"),
model = paste0(c2," prior_nu ~ uniform(0,100); \n")))
expect_equal(stan_rngprior(TRUE, prior = "delta ~ normal(0,1); \n", family = "cumulative"),
list(par = paste0(c1," real<lower=0> prior_delta; \n"),
model = paste0(c2," prior_delta ~ normal(0,1); \n")))
expect_equal(stan_rngprior(TRUE, prior = "b ~ normal(0,5); \n"),
list(genD = " real prior_b; \n",
genC = paste0(c2," prior_b <- normal_rng(0,5); \n")))
expect_equal(stan_rngprior(TRUE, prior = "b[1] ~ normal(0,5); \n"),
list(genD = " real prior_b_1; \n",
genC = paste0(c2," prior_b_1 <- normal_rng(0,5); \n")))
expect_equal(stan_rngprior(TRUE, prior = "bp[1] ~ normal(0,5); \n"),
list(genD = " real prior_bp_1; \n",
genC = paste0(c2," prior_bp_1 <- normal_rng(0,5); \n")))
expect_equal(stan_rngprior(TRUE, prior = "sigma[2] ~ normal(0,5); \n"),
list(par = paste0(c1," real<lower=0> prior_sigma_2; \n"),
model = paste0(c2," prior_sigma_2 ~ normal(0,5); \n")))
expect_equal(stan_rngprior(TRUE, prior = "sd_1[1] ~ normal(0,5); \n sd_1[2] ~ cauchy(0,2); \n"),
list(par = paste0(c1," real<lower=0> prior_sd_1_1; \n real<lower=0> prior_sd_1_2; \n"),
model = paste0(c2," prior_sd_1_1 ~ normal(0,5); \n prior_sd_1_2 ~ cauchy(0,2); \n")))
})
test_that("Test that stan_functions returns correct user defined functions", {
expect_match(stan_model(rating ~ treat + period + carry + (1+carry|subject), data = inhaler,
cov.ranef = list(subject = 1)), "matrix kronecker_cholesky.*vector\\[\\] to_array")
})
|
/tests/testthat/tests.stan.R
|
no_license
|
BERENZ/brms
|
R
| false
| false
| 9,838
|
r
|
test_that("Test that stan_prior accepts supported prior classes", {
prior <- prior_frame(prior = "uniform(0,10)", class = "b")
expect_equal(stan_prior(class = "b", coef = "x1", prior = prior),
" b ~ uniform(0,10); \n")
prior <- prior_frame(prior = c("uniform(0,10)", "normal(0,1)"),
class = "b", coef = c("", "x1"))
expect_equal(stan_prior(class = "b", coef = c("x1","x2"), prior = prior),
" b[1] ~ normal(0,1); \n b[2] ~ uniform(0,10); \n")
expect_equal(stan_prior("ar", prior = prior_frame("uniform(0,1)", class = "ar")),
" ar ~ uniform(0,1); \n")
expect_equal(stan_prior("ma", prior = prior_frame("normal(0,5)", class = "ma")),
" ma ~ normal(0,5); \n")
prior <- prior_frame("lkj_corr_cholesky(2)", class = "rescor")
expect_equal(stan_prior("rescor", prior = prior),
" rescor ~ lkj_corr_cholesky(2); \n")
})
test_that("Test that stan_prior returns the correct indices", {
prior <- prior_frame(prior = c("cauchy(0,5)", "normal(0,1)", "normal(0,1)"),
class = c("sd", "sd", "bp"), coef = c("", "x2", "z"))
expect_equal(stan_prior(class = "sd", coef = "Intercept", prior = prior),
" sd ~ cauchy(0,5); \n")
expect_equal(stan_prior(class = "sd", coef = c("x1", "x2"), prior = prior),
" sd[1] ~ cauchy(0,5); \n sd[2] ~ normal(0,1); \n")
expect_equal(stan_prior("bp", coef = "z", prior = prior),
" bp[1] ~ normal(0,1); \n")
})
test_that("Test that stan_prior can remove default priors", {
prior <- prior_frame(prior = "", class = c("sigma", "sd", "shape"),
group = c("", "g", ""))
expect_equal(stan_prior("sigma", prior = prior), "")
expect_equal(stan_prior("sd", group = "g", prior = prior), "")
expect_equal(stan_prior("shape", prior = prior), "")
})
test_that("Test that stan_eta returns correct strings for autocorrelation models", {
expect_match(stan_eta(family = "poisson", link = "log", f = c("Trt_c"),
autocor = cor.arma(~visit|patient, p=1))$transC1,
"eta <- X * b + b_Intercept + Yar * ar", fixed = TRUE)
expect_match(stan_eta(family = "poisson", link = "log", f = c("Trt_c"),
autocor = cor.arma(~visit|patient, q=1))$transC2,
"eta[n] <- eta[n] + Ema[n] * ma", fixed = TRUE)
})
test_that("Test_that stan_ma returns correct strings (or errors) for moving average models", {
expect_equal(stan_ma(family = "gaussian", link = "log", autocor = cor.arma()), list())
expect_match(stan_ma(family = "gaussian", link = "log", autocor = cor.arma(~visit|patient, q=1))$transC2,
"Ema[n + 1, i] <- e[n + 1 - i]", fixed = TRUE)
expect_match(stan_ma(family = "multinormal", link = "inverse", autocor = cor.arma(~visit|patient, q=1))$transC2,
"e[n] <- inv(Y[m, k]) - eta[n]", fixed = TRUE)
expect_error(stan_ma(family = "poisson", link = "log", autocor = cor.arma(~visit|patient, p=1, q=1)),
"moving-average models for family poisson are not yet implemented")
})
test_that("Test that stan_model accepts supported links", {
expect_match(stan_model(rating ~ treat + period + carry, data = inhaler,
family = "sratio", link = "probit_approx"), "Phi_approx")
expect_match(stan_model(rating ~ treat + period + carry, data = inhaler,
family = "cumulative", link = "probit"), "Phi")
expect_match(stan_model(rating ~ treat + period + carry, data = inhaler,
family = "poisson", link = "log"), "log")
})
test_that("Test that stan_model returns correct strings for customized covariances", {
expect_match(stan_model(rating ~ treat + period + carry + (1|subject), data = inhaler,
cov.ranef = list(subject = 1)), fixed = TRUE,
"r_1 <- sd_1 * (cov_1 * pre_1)")
expect_match(stan_model(rating ~ treat + period + carry + (1+carry|subject), data = inhaler,
cov.ranef = list(subject = 1)), fixed = TRUE,
paste0("r_1 <- to_array(kronecker_cholesky(cov_1, L_1, sd_1) * ",
"to_vector(pre_1), N_1, K_1"))
expect_match(stan_model(rating ~ treat + period + carry + (1+carry||subject), data = inhaler,
cov.ranef = list(subject = 1)), fixed = TRUE,
paste0("r_1 <- to_array(to_vector(rep_matrix(sd_1, N_1)) .* ",
"(cov_1 * to_vector(pre_1)), N_1, K_1)"))
})
test_that("Test that stan_model handles addition arguments correctly", {
expect_match(stan_model(time | cens(censored) ~ age + sex + disease, data = kidney,
family = "weibull", link = "log"), "vector[N] cens;", fixed = TRUE)
expect_match(stan_model(time | trunc(0) ~ age + sex + disease, data = kidney,
family = "gamma", link = "log"), "T[lb, ];", fixed = TRUE)
expect_match(stan_model(time | trunc(ub = 100) ~ age + sex + disease, data = kidney,
family = "cauchy", link = "log"), "T[, ub];", fixed = TRUE)
expect_match(stan_model(count | trunc(0, 150) ~ Trt_c, data = epilepsy,
family = "poisson", link = "log"), "T[lb, ub];", fixed = TRUE)
})
test_that("Test that stan_model correctly combines strings of multiple grouping factors", {
expect_match(stan_model(count ~ (1|patient) + (1+Trt_c|visit),
data = epilepsy, family = "poisson", link = "log"),
" real Z_1[N]; # RE design matrix \n # data for random effects of visit \n",
fixed = TRUE)
expect_match(stan_model(count ~ (1|visit) + (1+Trt_c|patient),
data = epilepsy, family = "poisson", link = "log"),
" int NC_1; # number of correlations \n # data for random effects of visit \n",
fixed = TRUE)
})
test_that("Test that stan_ordinal returns correct strings", {
expect_match(stan_ordinal(family = "sratio", link = "logit")$par, "")
})
test_that("Test that stan_llh uses simplifications when possible", {
expect_equal(stan_llh(family = "bernoulli", link = "logit"), " Y ~ bernoulli_logit(eta); \n")
expect_equal(stan_llh(family = "gaussian", link = "log"), " Y ~ lognormal(eta, sigma); \n")
expect_match(stan_llh(family = "gaussian", link = "log", weights = TRUE),
"lognormal_log(Y[n], eta[n], sigma); \n", fixed = TRUE)
expect_equal(stan_llh(family = "poisson", link = "log"), " Y ~ poisson_log(eta); \n")
expect_match(stan_llh(family = "cumulative", link = "logit"), fixed = TRUE,
" Y[n] ~ ordered_logistic(eta[n], b_Intercept); \n")
})
test_that("Test that stan_llh returns correct llhs under weights and censoring", {
expect_equal(stan_llh(family = "cauchy", link = "inverse", weights = TRUE),
" lp_pre[n] <- cauchy_log(Y[n], eta[n], sigma); \n")
expect_equal(stan_llh(family = "poisson", link = "log", weights = TRUE),
" lp_pre[n] <- poisson_log_log(Y[n], eta[n]); \n")
expect_match(stan_llh(family = "poisson", link = "log", cens = TRUE),
"Y[n] ~ poisson(exp(eta[n])); \n", fixed = TRUE)
expect_equal(stan_llh(family = "binomial", link = "logit", add = TRUE, weights = TRUE),
" lp_pre[n] <- binomial_logit_log(Y[n], trials[n], eta[n]); \n")
expect_match(stan_llh(family = "weibull", link = "inverse", cens = TRUE), fixed = TRUE,
"increment_log_prob(weibull_ccdf_log(Y[n], shape, eta[n])); \n")
expect_match(stan_llh(family = "weibull", link = "inverse", cens = TRUE, weights = TRUE), fixed = TRUE,
"increment_log_prob(weights[n] * weibull_ccdf_log(Y[n], shape, eta[n])); \n")
})
test_that("Test that stan_rngprior returns correct sampling statements for priors", {
c1 <- " # parameters to store prior samples \n"
c2 <- " # additionally draw samples from priors \n"
expect_equal(stan_rngprior(TRUE, prior = "nu ~ uniform(0,100); \n"),
list(par = paste0(c1," real<lower=0> prior_nu; \n"),
model = paste0(c2," prior_nu ~ uniform(0,100); \n")))
expect_equal(stan_rngprior(TRUE, prior = "delta ~ normal(0,1); \n", family = "cumulative"),
list(par = paste0(c1," real<lower=0> prior_delta; \n"),
model = paste0(c2," prior_delta ~ normal(0,1); \n")))
expect_equal(stan_rngprior(TRUE, prior = "b ~ normal(0,5); \n"),
list(genD = " real prior_b; \n",
genC = paste0(c2," prior_b <- normal_rng(0,5); \n")))
expect_equal(stan_rngprior(TRUE, prior = "b[1] ~ normal(0,5); \n"),
list(genD = " real prior_b_1; \n",
genC = paste0(c2," prior_b_1 <- normal_rng(0,5); \n")))
expect_equal(stan_rngprior(TRUE, prior = "bp[1] ~ normal(0,5); \n"),
list(genD = " real prior_bp_1; \n",
genC = paste0(c2," prior_bp_1 <- normal_rng(0,5); \n")))
expect_equal(stan_rngprior(TRUE, prior = "sigma[2] ~ normal(0,5); \n"),
list(par = paste0(c1," real<lower=0> prior_sigma_2; \n"),
model = paste0(c2," prior_sigma_2 ~ normal(0,5); \n")))
expect_equal(stan_rngprior(TRUE, prior = "sd_1[1] ~ normal(0,5); \n sd_1[2] ~ cauchy(0,2); \n"),
list(par = paste0(c1," real<lower=0> prior_sd_1_1; \n real<lower=0> prior_sd_1_2; \n"),
model = paste0(c2," prior_sd_1_1 ~ normal(0,5); \n prior_sd_1_2 ~ cauchy(0,2); \n")))
})
test_that("Test that stan_functions returns correct user defined functions", {
expect_match(stan_model(rating ~ treat + period + carry + (1+carry|subject), data = inhaler,
cov.ranef = list(subject = 1)), "matrix kronecker_cholesky.*vector\\[\\] to_array")
})
|
### data camp course ####
# topic: machine learning with tree based models
# part 2: regression trees
# data set : creditsub - classification of loan defaults
packages = c("rpart","tidyverse","caret","rpart.plot","Metrics")
package.check <- lapply(packages, FUN = function(pkg) {
if (!require(pkg, character.only = TRUE)) {
install.packages(pkg, dependencies = TRUE)
library(pkg, character.only = TRUE)
}
})
help(package="Metrics")
grade <- read.csv("grade.csv")
# Look/explore the data
str(grade)
# Randomly assign rows to ids (1/2/3 represents train/valid/test)
# This will generate a vector of ids of length equal to the number of rows
# The train/valid/test split will be approximately 70% / 15% / 15%
set.seed(1)
assignment <- sample(1:3, size = nrow(grade), prob = c(0.7,0.15,0.15), replace = TRUE)
# Create a train, validation and tests from the original data frame
grade_train <- grade[assignment == 1, ] # subset the grade data frame to training indices only
grade_valid <- grade[assignment ==2, ] # subset the grade data frame to validation indices only
grade_test <- grade[assignment == 3, ] # subset the grade data frame to test indices only
# Train the model
grade_model <- rpart(formula = final_grade ~ .,
data = grade_train,
method = "anova")
# Look at the model output
print(grade_model)
# Plot the tree model
rpart.plot(x = grade_model, yesno = 2, type = 0, extra = 0)
# Generate predictions on a test set
pred <- predict(object = grade_model, # model object
newdata = grade_test) # test dataset
table(pred)
# Compute the RMSE
rmse(actual = grade_test$final_grade,
predicted = pred)
|
/tree based models/regression trees.R
|
no_license
|
balramsidh/Data-Projects
|
R
| false
| false
| 1,727
|
r
|
### data camp course ####
# topic: machine learning with tree based models
# part 2: regression trees
# data set : creditsub - classification of loan defaults
packages = c("rpart","tidyverse","caret","rpart.plot","Metrics")
package.check <- lapply(packages, FUN = function(pkg) {
if (!require(pkg, character.only = TRUE)) {
install.packages(pkg, dependencies = TRUE)
library(pkg, character.only = TRUE)
}
})
help(package="Metrics")
grade <- read.csv("grade.csv")
# Look/explore the data
str(grade)
# Randomly assign rows to ids (1/2/3 represents train/valid/test)
# This will generate a vector of ids of length equal to the number of rows
# The train/valid/test split will be approximately 70% / 15% / 15%
set.seed(1)
assignment <- sample(1:3, size = nrow(grade), prob = c(0.7,0.15,0.15), replace = TRUE)
# Create a train, validation and tests from the original data frame
grade_train <- grade[assignment == 1, ] # subset the grade data frame to training indices only
grade_valid <- grade[assignment ==2, ] # subset the grade data frame to validation indices only
grade_test <- grade[assignment == 3, ] # subset the grade data frame to test indices only
# Train the model
grade_model <- rpart(formula = final_grade ~ .,
data = grade_train,
method = "anova")
# Look at the model output
print(grade_model)
# Plot the tree model
rpart.plot(x = grade_model, yesno = 2, type = 0, extra = 0)
# Generate predictions on a test set
pred <- predict(object = grade_model, # model object
newdata = grade_test) # test dataset
table(pred)
# Compute the RMSE
rmse(actual = grade_test$final_grade,
predicted = pred)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LibOPF.R
\name{opf_check}
\alias{opf_check}
\title{Checks the OPF file for proper formatting purposes}
\usage{
opf_check(file)
}
\arguments{
\item{file}{The text OPF file name}
}
\value{
`NULL`
}
\description{
Checks the OPF file for proper formatting purposes
}
\details{
usage opf_check <input ASCII file in the LibOPF format>:
Note that the input file for opf_check must be a text file.
Use opf2txt to convert your OPF binary file into a text file.
}
\examples{
dataset <- opf_read_subGraph(system.file("extdata/boat.dat",package = "LibOPF"))
File <- file.path(tempdir(), "boat.txt")
opf2txt(dataset,File)
opf_check(File)
}
|
/man/opf_check.Rd
|
no_license
|
RafaelJM/LibOPF-in-R
|
R
| false
| true
| 706
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LibOPF.R
\name{opf_check}
\alias{opf_check}
\title{Checks the OPF file for proper formatting purposes}
\usage{
opf_check(file)
}
\arguments{
\item{file}{The text OPF file name}
}
\value{
`NULL`
}
\description{
Checks the OPF file for proper formatting purposes
}
\details{
usage opf_check <input ASCII file in the LibOPF format>:
Note that the input file for opf_check must be a text file.
Use opf2txt to convert your OPF binary file into a text file.
}
\examples{
dataset <- opf_read_subGraph(system.file("extdata/boat.dat",package = "LibOPF"))
File <- file.path(tempdir(), "boat.txt")
opf2txt(dataset,File)
opf_check(File)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.